Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/cddl/lib/pam_zfs_key/Makefile b/cddl/lib/pam_zfs_key/Makefile
index 4d3b41bf3ad0..517ca402d4da 100644
--- a/cddl/lib/pam_zfs_key/Makefile
+++ b/cddl/lib/pam_zfs_key/Makefile
@@ -1,29 +1,30 @@
.PATH: ${SRCTOP}/sys/contrib/openzfs/contrib/pam_zfs_key
.PATH: ${SRCTOP}/sys/contrib/openzfs/include
PACKAGE= zfs
LIB= pam_zfs_key
SHLIBDIR?= /usr/lib
LIBADD= zfs zfs_core nvpair uutil crypto
SRCS= pam_zfs_key.c
WARNS?= 2
CSTD= c99
CFLAGS+= -DIN_BASE
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/include
+CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/lib/libspl/include/
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/lib/libspl/include/os/freebsd
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/lib/libzpool/include
CFLAGS+= -I${SRCTOP}/cddl/compat/opensolaris/include
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/module/icp/include
CFLAGS+= -include ${SRCTOP}/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompile.h
CFLAGS+= -DHAVE_ISSETUGID
CFLAGS+= -include ${SRCTOP}/sys/modules/zfs/zfs_config.h
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/include/os/freebsd/zfs
CFLAGS+= -DRUNSTATEDIR=\"/var/run\"
.include "../../lib/libpam/modules/Makefile.inc"
.include <bsd.lib.mk>
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index aba99fabbbb9..5e8f282e96c3 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -1,9901 +1,9906 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
* Copyright (c) 2015, 2017, Intel Corporation.
* Copyright (c) 2020 Datto Inc.
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
* Copyright (c) 2021 Allan Jude
* Copyright (c) 2021 Toomas Soome <tsoome@me.com>
* Copyright (c) 2023, 2024, Klara Inc.
* Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include <getopt.h>
#include <openssl/evp.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/zap_impl.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_bookmark.h>
#include <sys/dbuf.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/dmu_send.h>
#include <sys/dmu_traverse.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/ddt.h>
#include <sys/ddt_impl.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/blkptr.h>
#include <sys/dsl_crypt.h>
#include <sys/dsl_scan.h>
#include <sys/btree.h>
#include <sys/brt.h>
#include <sys/brt_impl.h>
#include <zfs_comutil.h>
#include <sys/zstd/zstd.h>
#include <sys/backtrace.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <libzfs_core.h>
#include <libzdb.h>
#include "zdb.h"
extern int reference_tracking_enable;
extern int zfs_recover;
extern uint_t zfs_vdev_async_read_max_active;
extern boolean_t spa_load_verify_dryrun;
extern boolean_t spa_mode_readable_spacemaps;
extern uint_t zfs_reconstruct_indirect_combinations_max;
extern uint_t zfs_btree_verify_intensity;
static const char cmdname[] = "zdb";
uint8_t dump_opt[256];
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
static uint64_t *zopt_metaslab = NULL;
static unsigned zopt_metaslab_args = 0;
static zopt_object_range_t *zopt_object_ranges = NULL;
static unsigned zopt_object_args = 0;
static int flagbits[256];
static uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
static int leaked_objects = 0;
-static range_tree_t *mos_refd_objs;
+static zfs_range_tree_t *mos_refd_objs;
static spa_t *spa;
static objset_t *os;
static boolean_t kernel_init_done;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *,
boolean_t);
static void mos_obj_refd(uint64_t);
static void mos_obj_refd_multiple(uint64_t);
static int dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx);
static void zdb_print_blkptr(const blkptr_t *bp, int flags);
static void zdb_exit(int reason);
typedef struct sublivelist_verify_block_refcnt {
/* block pointer entry in livelist being verified */
blkptr_t svbr_blk;
/*
* Refcount gets incremented to 1 when we encounter the first
* FREE entry for the svfbr block pointer and a node for it
* is created in our ZDB verification/tracking metadata.
*
* As we encounter more FREE entries we increment this counter
* and similarly decrement it whenever we find the respective
* ALLOC entries for this block.
*
* When the refcount gets to 0 it means that all the FREE and
* ALLOC entries of this block have paired up and we no longer
* need to track it in our verification logic (e.g. the node
* containing this struct in our verification data structure
* should be freed).
*
* [refer to sublivelist_verify_blkptr() for the actual code]
*/
uint32_t svbr_refcnt;
} sublivelist_verify_block_refcnt_t;
static int
sublivelist_block_refcnt_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_refcnt_t *l = larg;
const sublivelist_verify_block_refcnt_t *r = rarg;
return (livelist_compare(&l->svbr_blk, &r->svbr_blk));
}
static int
sublivelist_verify_blkptr(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx)
{
ASSERT3P(tx, ==, NULL);
struct sublivelist_verify *sv = arg;
sublivelist_verify_block_refcnt_t current = {
.svbr_blk = *bp,
/*
* Start with 1 in case this is the first free entry.
* This field is not used for our B-Tree comparisons
* anyway.
*/
.svbr_refcnt = 1,
};
zfs_btree_index_t where;
sublivelist_verify_block_refcnt_t *pair =
zfs_btree_find(&sv->sv_pair, &current, &where);
if (free) {
if (pair == NULL) {
/* first free entry for this block pointer */
zfs_btree_add(&sv->sv_pair, &current);
} else {
pair->svbr_refcnt++;
}
} else {
if (pair == NULL) {
/* block that is currently marked as allocated */
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
if (DVA_IS_EMPTY(&bp->blk_dva[i]))
break;
sublivelist_verify_block_t svb = {
.svb_dva = bp->blk_dva[i],
.svb_allocated_txg =
BP_GET_LOGICAL_BIRTH(bp)
};
if (zfs_btree_find(&sv->sv_leftover, &svb,
&where) == NULL) {
zfs_btree_add_idx(&sv->sv_leftover,
&svb, &where);
}
}
} else {
/* alloc matches a free entry */
pair->svbr_refcnt--;
if (pair->svbr_refcnt == 0) {
/* all allocs and frees have been matched */
zfs_btree_remove_idx(&sv->sv_pair, &where);
}
}
}
return (0);
}
static int
sublivelist_verify_func(void *args, dsl_deadlist_entry_t *dle)
{
int err;
struct sublivelist_verify *sv = args;
zfs_btree_create(&sv->sv_pair, sublivelist_block_refcnt_compare, NULL,
sizeof (sublivelist_verify_block_refcnt_t));
err = bpobj_iterate_nofree(&dle->dle_bpobj, sublivelist_verify_blkptr,
sv, NULL);
sublivelist_verify_block_refcnt_t *e;
zfs_btree_index_t *cookie = NULL;
while ((e = zfs_btree_destroy_nodes(&sv->sv_pair, &cookie)) != NULL) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
&e->svbr_blk, B_TRUE);
(void) printf("\tERROR: %d unmatched FREE(s): %s\n",
e->svbr_refcnt, blkbuf);
}
zfs_btree_destroy(&sv->sv_pair);
return (err);
}
static int
livelist_block_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_t *l = larg;
const sublivelist_verify_block_t *r = rarg;
if (DVA_GET_VDEV(&l->svb_dva) < DVA_GET_VDEV(&r->svb_dva))
return (-1);
else if (DVA_GET_VDEV(&l->svb_dva) > DVA_GET_VDEV(&r->svb_dva))
return (+1);
if (DVA_GET_OFFSET(&l->svb_dva) < DVA_GET_OFFSET(&r->svb_dva))
return (-1);
else if (DVA_GET_OFFSET(&l->svb_dva) > DVA_GET_OFFSET(&r->svb_dva))
return (+1);
if (DVA_GET_ASIZE(&l->svb_dva) < DVA_GET_ASIZE(&r->svb_dva))
return (-1);
else if (DVA_GET_ASIZE(&l->svb_dva) > DVA_GET_ASIZE(&r->svb_dva))
return (+1);
return (0);
}
/*
* Check for errors in a livelist while tracking all unfreed ALLOCs in the
* sublivelist_verify_t: sv->sv_leftover
*/
static void
livelist_verify(dsl_deadlist_t *dl, void *arg)
{
sublivelist_verify_t *sv = arg;
dsl_deadlist_iterate(dl, sublivelist_verify_func, sv);
}
/*
* Check for errors in the livelist entry and discard the intermediary
* data structures
*/
static int
sublivelist_verify_lightweight(void *args, dsl_deadlist_entry_t *dle)
{
(void) args;
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
int err = sublivelist_verify_func(&sv, dle);
zfs_btree_clear(&sv.sv_leftover);
zfs_btree_destroy(&sv.sv_leftover);
return (err);
}
typedef struct metaslab_verify {
/*
* Tree containing all the leftover ALLOCs from the livelists
* that are part of this metaslab.
*/
zfs_btree_t mv_livelist_allocs;
/*
* Metaslab information.
*/
uint64_t mv_vdid;
uint64_t mv_msid;
uint64_t mv_start;
uint64_t mv_end;
/*
* What's currently allocated for this metaslab.
*/
- range_tree_t *mv_allocated;
+ zfs_range_tree_t *mv_allocated;
} metaslab_verify_t;
typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme, uint64_t txg,
void *arg);
typedef struct unflushed_iter_cb_arg {
spa_t *uic_spa;
uint64_t uic_txg;
void *uic_arg;
zdb_log_sm_cb_t uic_cb;
} unflushed_iter_cb_arg_t;
static int
iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
{
unflushed_iter_cb_arg_t *uic = arg;
return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
}
static void
iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
unflushed_iter_cb_arg_t uic = {
.uic_spa = spa,
.uic_txg = sls->sls_txg,
.uic_arg = arg,
.uic_cb = cb
};
VERIFY0(space_map_iterate(sm, space_map_length(sm),
iterate_through_spacemap_logs_cb, &uic));
space_map_close(sm);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static void
verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
uint64_t offset, uint64_t size)
{
sublivelist_verify_block_t svb = {{{0}}};
DVA_SET_VDEV(&svb.svb_dva, mv->mv_vdid);
DVA_SET_OFFSET(&svb.svb_dva, offset);
DVA_SET_ASIZE(&svb.svb_dva, size);
zfs_btree_index_t where;
uint64_t end_offset = offset + size;
/*
* Look for an exact match for spacemap entry in the livelist entries.
* Then, look for other livelist entries that fall within the range
* of the spacemap entry as it may have been condensed
*/
sublivelist_verify_block_t *found =
zfs_btree_find(&mv->mv_livelist_allocs, &svb, &where);
if (found == NULL) {
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where);
}
for (; found != NULL && DVA_GET_VDEV(&found->svb_dva) == mv->mv_vdid &&
DVA_GET_OFFSET(&found->svb_dva) < end_offset;
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
if (found->svb_allocated_txg <= txg) {
(void) printf("ERROR: Livelist ALLOC [%llx:%llx] "
"from TXG %llx FREED at TXG %llx\n",
(u_longlong_t)DVA_GET_OFFSET(&found->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&found->svb_dva),
(u_longlong_t)found->svb_allocated_txg,
(u_longlong_t)txg);
}
}
}
static int
metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t txg = sme->sme_txg;
if (sme->sme_type == SM_ALLOC) {
- if (range_tree_contains(mv->mv_allocated,
+ if (zfs_range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE ALLOC: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
- range_tree_add(mv->mv_allocated,
+ zfs_range_tree_add(mv->mv_allocated,
offset, size);
}
} else {
- if (!range_tree_contains(mv->mv_allocated,
+ if (!zfs_range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE FREE: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
- range_tree_remove(mv->mv_allocated,
+ zfs_range_tree_remove(mv->mv_allocated,
offset, size);
}
}
if (sme->sme_type != SM_ALLOC) {
/*
* If something is freed in the spacemap, verify that
* it is not listed as allocated in the livelist.
*/
verify_livelist_allocs(mv, txg, offset, size);
}
return (0);
}
static int
spacemap_check_sm_log_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
if (vdev_id != mv->mv_vdid)
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
if (ms->ms_id != mv->mv_msid)
return (0);
if (txg < metaslab_unflushed_txg(ms))
return (0);
ASSERT3U(txg, ==, sme->sme_txg);
return (metaslab_spacemap_validation_cb(sme, mv));
}
static void
spacemap_check_sm_log(spa_t *spa, metaslab_verify_t *mv)
{
iterate_through_spacemap_logs(spa, spacemap_check_sm_log_cb, mv);
}
static void
spacemap_check_ms_sm(space_map_t *sm, metaslab_verify_t *mv)
{
if (sm == NULL)
return;
VERIFY0(space_map_iterate(sm, space_map_length(sm),
metaslab_spacemap_validation_cb, mv));
}
static void iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg);
/*
* Transfer blocks from sv_leftover tree to the mv_livelist_allocs if
* they are part of that metaslab (mv_msid).
*/
static void
mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
{
zfs_btree_index_t where;
sublivelist_verify_block_t *svb;
ASSERT3U(zfs_btree_numnodes(&mv->mv_livelist_allocs), ==, 0);
for (svb = zfs_btree_first(&sv->sv_leftover, &where);
svb != NULL;
svb = zfs_btree_next(&sv->sv_leftover, &where, &where)) {
if (DVA_GET_VDEV(&svb->svb_dva) != mv->mv_vdid)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start &&
(DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_start) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) >= mv->mv_end)
continue;
if ((DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_end) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
zfs_btree_add(&mv->mv_livelist_allocs, svb);
}
for (svb = zfs_btree_first(&mv->mv_livelist_allocs, &where);
svb != NULL;
svb = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
zfs_btree_remove(&sv->sv_leftover, svb);
}
}
/*
* [Livelist Check]
* Iterate through all the sublivelists and:
* - report leftover frees (**)
* - record leftover ALLOCs together with their TXG [see Cross Check]
*
* (**) Note: Double ALLOCs are valid in datasets that have dedup
* enabled. Similarly double FREEs are allowed as well but
* only if they pair up with a corresponding ALLOC entry once
* we our done with our sublivelist iteration.
*
* [Spacemap Check]
* for each metaslab:
* - iterate over spacemap and then the metaslab's entries in the
* spacemap log, then report any double FREEs and ALLOCs (do not
* blow up).
*
* [Cross Check]
* After finishing the Livelist Check phase and while being in the
* Spacemap Check phase, we find all the recorded leftover ALLOCs
* of the livelist check that are part of the metaslab that we are
* currently looking at in the Spacemap Check. We report any entries
* that are marked as ALLOCs in the livelists but have been actually
* freed (and potentially allocated again) after their TXG stamp in
* the spacemaps. Also report any ALLOCs from the livelists that
* belong to indirect vdevs (e.g. their vdev completed removal).
*
* Note that this will miss Log Spacemap entries that cancelled each other
* out before being flushed to the metaslab, so we are not guaranteed
* to match all erroneous ALLOCs.
*/
static void
livelist_metaslab_validate(spa_t *spa)
{
(void) printf("Verifying deleted livelist entries\n");
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
iterate_deleted_livelists(spa, livelist_verify, &sv);
(void) printf("Verifying metaslab entries\n");
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
for (uint64_t mid = 0; mid < vd->vdev_ms_count; mid++) {
metaslab_t *m = vd->vdev_ms[mid];
(void) fprintf(stderr,
"\rverifying concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)mid,
(longlong_t)vd->vdev_ms_count);
uint64_t shift, start;
- range_seg_type_t type =
+ zfs_range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, m,
&start, &shift);
metaslab_verify_t mv;
- mv.mv_allocated = range_tree_create(NULL,
+ mv.mv_allocated = zfs_range_tree_create(NULL,
type, NULL, start, shift);
mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id;
mv.mv_start = m->ms_start;
mv.mv_end = m->ms_start + m->ms_size;
zfs_btree_create(&mv.mv_livelist_allocs,
livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
mv_populate_livelist_allocs(&mv, &sv);
spacemap_check_ms_sm(m->ms_sm, &mv);
spacemap_check_sm_log(spa, &mv);
- range_tree_vacate(mv.mv_allocated, NULL, NULL);
- range_tree_destroy(mv.mv_allocated);
+ zfs_range_tree_vacate(mv.mv_allocated, NULL, NULL);
+ zfs_range_tree_destroy(mv.mv_allocated);
zfs_btree_clear(&mv.mv_livelist_allocs);
zfs_btree_destroy(&mv.mv_livelist_allocs);
}
}
(void) fprintf(stderr, "\n");
/*
* If there are any segments in the leftover tree after we walked
* through all the metaslabs in the concrete vdevs then this means
* that we have segments in the livelists that belong to indirect
* vdevs and are marked as allocated.
*/
if (zfs_btree_numnodes(&sv.sv_leftover) == 0) {
zfs_btree_destroy(&sv.sv_leftover);
return;
}
(void) printf("ERROR: Found livelist blocks marked as allocated "
"for indirect vdevs:\n");
zfs_btree_index_t *where = NULL;
sublivelist_verify_block_t *svb;
while ((svb = zfs_btree_destroy_nodes(&sv.sv_leftover, &where)) !=
NULL) {
int vdev_id = DVA_GET_VDEV(&svb->svb_dva);
ASSERT3U(vdev_id, <, rvd->vdev_children);
vdev_t *vd = rvd->vdev_child[vdev_id];
ASSERT(!vdev_is_concrete(vd));
(void) printf("<%d:%llx:%llx> TXG %llx\n",
vdev_id, (u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva),
(u_longlong_t)svb->svb_allocated_txg);
}
(void) printf("\n");
zfs_btree_destroy(&sv.sv_leftover);
}
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
usage(void)
{
(void) fprintf(stderr,
"Usage:\t%s [-AbcdDFGhikLMPsvXy] [-e [-V] [-p <path> ...]] "
"[-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[-K <key>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]]\n"
"\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] [-K <key>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]\n"
"\t%s -B [-e [-V] [-p <path> ...]] [-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[-K <key>] <poolname>/<objset id> [<backupflags>]\n"
"\t%s [-v] <bookmark>\n"
"\t%s -C [-A] [-U <cache>] [<poolname>]\n"
"\t%s -l [-Aqu] <device>\n"
"\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
"[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
"\t%s -O [-K <key>] <dataset> <path>\n"
"\t%s -r [-K <key>] <dataset> <path> <destination>\n"
"\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
"\t%s -E [-A] word0:word1:...:word15\n"
"\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
"<poolname>\n\n",
cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
cmdname, cmdname, cmdname, cmdname, cmdname);
(void) fprintf(stderr, " Dataset name must include at least one "
"separator character '/' or '@'\n");
(void) fprintf(stderr, " If dataset name is specified, only that "
"dataset is dumped\n");
(void) fprintf(stderr, " If object numbers or object number "
"ranges are specified, only those\n"
" objects or ranges are dumped.\n\n");
(void) fprintf(stderr,
" Object ranges take the form <start>:<end>[:<flags>]\n"
" start Starting object number\n"
" end Ending object number, or -1 for no upper bound\n"
" flags Optional flags to select object types:\n"
" A All objects (this is the default)\n"
" d ZFS directories\n"
" f ZFS files \n"
" m SPA space maps\n"
" z ZAPs\n"
" - Negate effect of next flag\n\n");
(void) fprintf(stderr, " Options to control amount of output:\n");
(void) fprintf(stderr, " -b --block-stats "
"block statistics\n");
(void) fprintf(stderr, " -B --backup "
"backup stream\n");
(void) fprintf(stderr, " -c --checksum "
"checksum all metadata (twice for all data) blocks\n");
(void) fprintf(stderr, " -C --config "
"config (or cachefile if alone)\n");
(void) fprintf(stderr, " -d --datasets "
"dataset(s)\n");
(void) fprintf(stderr, " -D --dedup-stats "
"dedup statistics\n");
(void) fprintf(stderr, " -E --embedded-block-pointer=INTEGER\n"
" decode and display block "
"from an embedded block pointer\n");
(void) fprintf(stderr, " -h --history "
"pool history\n");
(void) fprintf(stderr, " -i --intent-logs "
"intent logs\n");
(void) fprintf(stderr, " -l --label "
"read label contents\n");
(void) fprintf(stderr, " -k --checkpointed-state "
"examine the checkpointed state of the pool\n");
(void) fprintf(stderr, " -L --disable-leak-tracking "
"disable leak tracking (do not load spacemaps)\n");
(void) fprintf(stderr, " -m --metaslabs "
"metaslabs\n");
(void) fprintf(stderr, " -M --metaslab-groups "
"metaslab groups\n");
(void) fprintf(stderr, " -O --object-lookups "
"perform object lookups by path\n");
(void) fprintf(stderr, " -r --copy-object "
"copy an object by path to file\n");
(void) fprintf(stderr, " -R --read-block "
"read and display block from a device\n");
(void) fprintf(stderr, " -s --io-stats "
"report stats on zdb's I/O\n");
(void) fprintf(stderr, " -S --simulate-dedup "
"simulate dedup to measure effect\n");
(void) fprintf(stderr, " -v --verbose "
"verbose (applies to all others)\n");
(void) fprintf(stderr, " -y --livelist "
"perform livelist and metaslab validation on any livelists being "
"deleted\n\n");
(void) fprintf(stderr, " Below options are intended for use "
"with other options:\n");
(void) fprintf(stderr, " -A --ignore-assertions "
"ignore assertions (-A), enable panic recovery (-AA) or both "
"(-AAA)\n");
(void) fprintf(stderr, " -e --exported "
"pool is exported/destroyed/has altroot/not in a cachefile\n");
(void) fprintf(stderr, " -F --automatic-rewind "
"attempt automatic rewind within safe range of transaction "
"groups\n");
(void) fprintf(stderr, " -G --dump-debug-msg "
"dump zfs_dbgmsg buffer before exiting\n");
(void) fprintf(stderr, " -I --inflight=INTEGER "
"specify the maximum number of checksumming I/Os "
"[default is 200]\n");
(void) fprintf(stderr, " -K --key=KEY "
"decryption key for encrypted dataset\n");
(void) fprintf(stderr, " -o --option=\"OPTION=INTEGER\" "
"set global variable to an unsigned 32-bit integer\n");
(void) fprintf(stderr, " -p --path==PATH "
"use one or more with -e to specify path to vdev dir\n");
(void) fprintf(stderr, " -P --parseable "
"print numbers in parseable form\n");
(void) fprintf(stderr, " -q --skip-label "
"don't print label contents\n");
(void) fprintf(stderr, " -t --txg=INTEGER "
"highest txg to use when searching for uberblocks\n");
(void) fprintf(stderr, " -T --brt-stats "
"BRT statistics\n");
(void) fprintf(stderr, " -u --uberblock "
"uberblock\n");
(void) fprintf(stderr, " -U --cachefile=PATH "
"use alternate cachefile\n");
(void) fprintf(stderr, " -V --verbatim "
"do verbatim import\n");
(void) fprintf(stderr, " -x --dump-blocks=PATH "
"dump all read blocks into specified directory\n");
(void) fprintf(stderr, " -X --extreme-rewind "
"attempt extreme rewind (does not work with dataset)\n");
(void) fprintf(stderr, " -Y --all-reconstruction "
"attempt all reconstruction combinations for split blocks\n");
(void) fprintf(stderr, " -Z --zstd-headers "
"show ZSTD headers \n");
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
zdb_exit(1);
}
static void
dump_debug_buffer(void)
{
ssize_t ret __attribute__((unused));
if (!dump_opt['G'])
return;
/*
* We use write() instead of printf() so that this function
* is safe to call from a signal handler.
*/
ret = write(STDERR_FILENO, "\n", 1);
zfs_dbgmsg_print(STDERR_FILENO, "zdb");
}
static void sig_handler(int signo)
{
struct sigaction action;
libspl_backtrace(STDERR_FILENO);
dump_debug_buffer();
/*
* Restore default action and re-raise signal so SIGSEGV and
* SIGABRT can trigger a core dump.
*/
action.sa_handler = SIG_DFL;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
(void) sigaction(signo, &action, NULL);
raise(signo);
}
/*
* Called for usage errors that are discovered after a call to spa_open(),
* dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
*/
static void
fatal(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) fprintf(stderr, "%s: ", cmdname);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
(void) fprintf(stderr, "\n");
dump_debug_buffer();
zdb_exit(1);
}
static void
dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) size;
nvlist_t *nv;
size_t nvsize = *(uint64_t *)data;
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
umem_free(packed, nvsize);
dump_nvlist(nv, 8);
nvlist_free(nv);
}
static void
dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) size;
spa_history_phys_t *shp = data;
if (shp == NULL)
return;
(void) printf("\t\tpool_create_len = %llu\n",
(u_longlong_t)shp->sh_pool_create_len);
(void) printf("\t\tphys_max_off = %llu\n",
(u_longlong_t)shp->sh_phys_max_off);
(void) printf("\t\tbof = %llu\n",
(u_longlong_t)shp->sh_bof);
(void) printf("\t\teof = %llu\n",
(u_longlong_t)shp->sh_eof);
(void) printf("\t\trecords_lost = %llu\n",
(u_longlong_t)shp->sh_records_lost);
}
static void
zdb_nicenum(uint64_t num, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)num);
else
nicenum(num, buf, buflen);
}
static void
zdb_nicebytes(uint64_t bytes, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)bytes);
else
zfs_nicebytes(bytes, buf, buflen);
}
static const char histo_stars[] = "****************************************";
static const uint64_t histo_width = sizeof (histo_stars) - 1;
static void
dump_histogram(const uint64_t *histo, int size, int offset)
{
int i;
int minidx = size - 1;
int maxidx = 0;
uint64_t max = 0;
for (i = 0; i < size; i++) {
if (histo[i] == 0)
continue;
if (histo[i] > max)
max = histo[i];
if (i > maxidx)
maxidx = i;
if (i < minidx)
minidx = i;
}
if (max < histo_width)
max = histo_width;
for (i = minidx; i <= maxidx; i++) {
(void) printf("\t\t\t%3u: %6llu %s\n",
i + offset, (u_longlong_t)histo[i],
&histo_stars[(max - histo[i]) * histo_width / max]);
}
}
static void
dump_zap_stats(objset_t *os, uint64_t object)
{
int error;
zap_stats_t zs;
error = zap_get_stats(os, object, &zs);
if (error)
return;
if (zs.zs_ptrtbl_len == 0) {
ASSERT(zs.zs_num_blocks == 1);
(void) printf("\tmicrozap: %llu bytes, %llu entries\n",
(u_longlong_t)zs.zs_blocksize,
(u_longlong_t)zs.zs_num_entries);
return;
}
(void) printf("\tFat ZAP stats:\n");
(void) printf("\t\tPointer table:\n");
(void) printf("\t\t\t%llu elements\n",
(u_longlong_t)zs.zs_ptrtbl_len);
(void) printf("\t\t\tzt_blk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_blk);
(void) printf("\t\t\tzt_numblks: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_numblks);
(void) printf("\t\t\tzt_shift: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_shift);
(void) printf("\t\t\tzt_blks_copied: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_blks_copied);
(void) printf("\t\t\tzt_nextblk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_nextblk);
(void) printf("\t\tZAP entries: %llu\n",
(u_longlong_t)zs.zs_num_entries);
(void) printf("\t\tLeaf blocks: %llu\n",
(u_longlong_t)zs.zs_num_leafs);
(void) printf("\t\tTotal blocks: %llu\n",
(u_longlong_t)zs.zs_num_blocks);
(void) printf("\t\tzap_block_type: 0x%llx\n",
(u_longlong_t)zs.zs_block_type);
(void) printf("\t\tzap_magic: 0x%llx\n",
(u_longlong_t)zs.zs_magic);
(void) printf("\t\tzap_salt: 0x%llx\n",
(u_longlong_t)zs.zs_salt);
(void) printf("\t\tLeafs with 2^n pointers:\n");
dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks with n*5 entries:\n");
dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks n/10 full:\n");
dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tEntries with n chunks:\n");
dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBuckets with n entries:\n");
dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
}
static void
dump_none(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
(void) printf("\tUNKNOWN OBJECT TYPE\n");
}
static void
dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
{
uint64_t *arr;
uint64_t oursize;
if (dump_opt['d'] < 6)
return;
if (data == NULL) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(os, object, &doi));
size = doi.doi_max_offset;
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
arr = kmem_alloc(oursize, KM_SLEEP);
int err = dmu_read(os, object, 0, oursize, arr, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(arr, oursize);
return;
}
} else {
/*
* Even though the allocation is already done in this code path,
* we still cap the size to prevent excessive printing.
*/
oursize = MIN(size, 1 << 20);
arr = data;
}
if (size == 0) {
if (data == NULL)
kmem_free(arr, oursize);
(void) printf("\t\t[]\n");
return;
}
(void) printf("\t\t[%0llx", (u_longlong_t)arr[0]);
for (size_t i = 1; i * sizeof (uint64_t) < oursize; i++) {
if (i % 4 != 0)
(void) printf(", %0llx", (u_longlong_t)arr[i]);
else
(void) printf(",\n\t\t%0llx", (u_longlong_t)arr[i]);
}
if (oursize != size)
(void) printf(", ... ");
(void) printf("]\n");
if (data == NULL)
kmem_free(arr, oursize);
}
static void
dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t *attrp = zap_attribute_long_alloc();
void *prop;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, attrp) == 0;
zap_cursor_advance(&zc)) {
boolean_t key64 =
!!(zap_getflags(zc.zc_zap) & ZAP_FLAG_UINT64_KEY);
if (key64)
(void) printf("\t\t0x%010" PRIu64 "x = ",
*(uint64_t *)attrp->za_name);
else
(void) printf("\t\t%s = ", attrp->za_name);
if (attrp->za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attrp->za_num_integers *
attrp->za_integer_length, UMEM_NOFAIL);
if (key64)
(void) zap_lookup_uint64(os, object,
(const uint64_t *)attrp->za_name, 1,
attrp->za_integer_length, attrp->za_num_integers,
prop);
else
(void) zap_lookup(os, object, attrp->za_name,
attrp->za_integer_length, attrp->za_num_integers,
prop);
if (attrp->za_integer_length == 1 && !key64) {
if (strcmp(attrp->za_name,
DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
strcmp(attrp->za_name,
DSL_CRYPTO_KEY_HMAC_KEY) == 0 ||
strcmp(attrp->za_name, DSL_CRYPTO_KEY_IV) == 0 ||
strcmp(attrp->za_name, DSL_CRYPTO_KEY_MAC) == 0 ||
strcmp(attrp->za_name,
DMU_POOL_CHECKSUM_SALT) == 0) {
uint8_t *u8 = prop;
for (i = 0; i < attrp->za_num_integers; i++) {
(void) printf("%02x", u8[i]);
}
} else {
(void) printf("%s", (char *)prop);
}
} else {
for (i = 0; i < attrp->za_num_integers; i++) {
switch (attrp->za_integer_length) {
case 1:
(void) printf("%u ",
((uint8_t *)prop)[i]);
break;
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
break;
case 4:
(void) printf("%u ",
((uint32_t *)prop)[i]);
break;
case 8:
(void) printf("%lld ",
(u_longlong_t)((int64_t *)prop)[i]);
break;
}
}
}
(void) printf("\n");
umem_free(prop,
attrp->za_num_integers * attrp->za_integer_length);
}
zap_cursor_fini(&zc);
zap_attribute_free(attrp);
}
static void
dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
{
bpobj_phys_t *bpop = data;
uint64_t i;
char bytes[32], comp[32], uncomp[32];
/* make sure the output won't get truncated */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (bpop == NULL)
return;
zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
(void) printf("\t\tbytes = %s\n", bytes);
if (size >= BPOBJ_SIZE_V1) {
(void) printf("\t\tcomp = %s\n", comp);
(void) printf("\t\tuncomp = %s\n", uncomp);
}
if (size >= BPOBJ_SIZE_V2) {
(void) printf("\t\tsubobjs = %llu\n",
(u_longlong_t)bpop->bpo_subobjs);
(void) printf("\t\tnum_subobjs = %llu\n",
(u_longlong_t)bpop->bpo_num_subobjs);
}
if (size >= sizeof (*bpop)) {
(void) printf("\t\tnum_freed = %llu\n",
(u_longlong_t)bpop->bpo_num_freed);
}
if (dump_opt['d'] < 5)
return;
for (i = 0; i < bpop->bpo_num_blkptrs; i++) {
char blkbuf[BP_SPRINTF_LEN];
blkptr_t bp;
int err = dmu_read(os, object,
i * sizeof (bp), sizeof (bp), &bp, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
break;
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp,
BP_GET_FREE(&bp));
(void) printf("\t%s\n", blkbuf);
}
}
static void
dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dmu_object_info_t doi;
int64_t i;
VERIFY0(dmu_object_info(os, object, &doi));
uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(subobjs, doi.doi_max_offset);
return;
}
int64_t last_nonzero = -1;
for (i = 0; i < doi.doi_max_offset / 8; i++) {
if (subobjs[i] != 0)
last_nonzero = i;
}
for (i = 0; i <= last_nonzero; i++) {
(void) printf("\t%llu\n", (u_longlong_t)subobjs[i]);
}
kmem_free(subobjs, doi.doi_max_offset);
}
static void
dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dump_zap_stats(os, object);
/* contents are printed elsewhere, properly decoded */
}
static void
dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t *attrp = zap_attribute_alloc();
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, attrp) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attrp->za_name);
if (attrp->za_num_integers == 0) {
(void) printf("\n");
continue;
}
(void) printf(" %llx : [%d:%d:%d]\n",
(u_longlong_t)attrp->za_first_integer,
(int)ATTR_LENGTH(attrp->za_first_integer),
(int)ATTR_BSWAP(attrp->za_first_integer),
(int)ATTR_NUM(attrp->za_first_integer));
}
zap_cursor_fini(&zc);
zap_attribute_free(attrp);
}
static void
dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t *attrp = zap_attribute_alloc();
uint16_t *layout_attrs;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, attrp) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = [", attrp->za_name);
if (attrp->za_num_integers == 0) {
(void) printf("\n");
continue;
}
VERIFY(attrp->za_integer_length == 2);
layout_attrs = umem_zalloc(attrp->za_num_integers *
attrp->za_integer_length, UMEM_NOFAIL);
VERIFY(zap_lookup(os, object, attrp->za_name,
attrp->za_integer_length,
attrp->za_num_integers, layout_attrs) == 0);
for (i = 0; i != attrp->za_num_integers; i++)
(void) printf(" %d ", (int)layout_attrs[i]);
(void) printf("]\n");
umem_free(layout_attrs,
attrp->za_num_integers * attrp->za_integer_length);
}
zap_cursor_fini(&zc);
zap_attribute_free(attrp);
}
static void
dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t *attrp = zap_attribute_long_alloc();
const char *typenames[] = {
/* 0 */ "not specified",
/* 1 */ "FIFO",
/* 2 */ "Character Device",
/* 3 */ "3 (invalid)",
/* 4 */ "Directory",
/* 5 */ "5 (invalid)",
/* 6 */ "Block Device",
/* 7 */ "7 (invalid)",
/* 8 */ "Regular File",
/* 9 */ "9 (invalid)",
/* 10 */ "Symbolic Link",
/* 11 */ "11 (invalid)",
/* 12 */ "Socket",
/* 13 */ "Door",
/* 14 */ "Event Port",
/* 15 */ "15 (invalid)",
};
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, attrp) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = %lld (type: %s)\n",
attrp->za_name, ZFS_DIRENT_OBJ(attrp->za_first_integer),
typenames[ZFS_DIRENT_TYPE(attrp->za_first_integer)]);
}
zap_cursor_fini(&zc);
zap_attribute_free(attrp);
}
static int
get_dtl_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_ops->vdev_op_leaf) {
space_map_t *sm = vd->vdev_dtl_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
return (1);
return (0);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_dtl_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_metaslab_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd) {
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
space_map_t *sm = vd->vdev_ms[m]->ms_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
refcount++;
}
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_metaslab_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_obsolete_refcount(vdev_t *vd)
{
uint64_t obsolete_sm_object;
int refcount = 0;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (vd->vdev_top == vd && obsolete_sm_object != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
obsolete_sm_object, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
refcount++;
}
} else {
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
ASSERT3U(obsolete_sm_object, ==, 0);
}
for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]);
}
return (refcount);
}
static int
get_prev_obsolete_spacemap_refcount(spa_t *spa)
{
uint64_t prev_obj =
spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
if (prev_obj != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
return (1);
}
}
return (0);
}
static int
get_checkpoint_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd && vd->vdev_top_zap != 0 &&
zap_contains(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0)
refcount++;
for (uint64_t c = 0; c < vd->vdev_children; c++)
refcount += get_checkpoint_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_log_spacemap_refcount(spa_t *spa)
{
return (avl_numnodes(&spa->spa_sm_logs_by_txg));
}
static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
uint64_t actual_refcount;
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
&expected_refcount);
actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
actual_refcount += get_log_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
"actual %lld\n",
(longlong_t)expected_refcount,
(longlong_t)actual_refcount);
return (2);
}
return (0);
}
static void
dump_spacemap(objset_t *os, space_map_t *sm)
{
const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
"INVALID", "INVALID", "INVALID", "INVALID" };
if (sm == NULL)
return;
(void) printf("space map object %llu:\n",
(longlong_t)sm->sm_object);
(void) printf(" smp_length = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_length);
(void) printf(" smp_alloc = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_alloc);
if (dump_opt['d'] < 6 && dump_opt['m'] < 4)
return;
/*
* Print out the freelist entries in both encoded and decoded form.
*/
uint8_t mapshift = sm->sm_shift;
int64_t alloc = 0;
uint64_t word, entry_id = 0;
for (uint64_t offset = 0; offset < space_map_length(sm);
offset += sizeof (word)) {
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (word), &word, DMU_READ_PREFETCH));
if (sm_entry_is_debug(word)) {
uint64_t de_txg = SM_DEBUG_TXG_DECODE(word);
uint64_t de_sync_pass = SM_DEBUG_SYNCPASS_DECODE(word);
if (de_txg == 0) {
(void) printf(
"\t [%6llu] PADDING\n",
(u_longlong_t)entry_id);
} else {
(void) printf(
"\t [%6llu] %s: txg %llu pass %llu\n",
(u_longlong_t)entry_id,
ddata[SM_DEBUG_ACTION_DECODE(word)],
(u_longlong_t)de_txg,
(u_longlong_t)de_sync_pass);
}
entry_id++;
continue;
}
uint8_t words;
char entry_type;
uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
if (sm_entry_is_single_word(word)) {
entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
sm->sm_start;
entry_run = SM_RUN_DECODE(word) << mapshift;
words = 1;
} else {
/* it is a two-word entry so we read another word */
ASSERT(sm_entry_is_double_word(word));
uint64_t extra_word;
offset += sizeof (extra_word);
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (extra_word), &extra_word,
DMU_READ_PREFETCH));
ASSERT3U(offset, <=, space_map_length(sm));
entry_run = SM2_RUN_DECODE(word) << mapshift;
entry_vdev = SM2_VDEV_DECODE(word);
entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM2_OFFSET_DECODE(extra_word) <<
mapshift) + sm->sm_start;
words = 2;
}
(void) printf("\t [%6llu] %c range:"
" %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
(u_longlong_t)entry_id,
entry_type, (u_longlong_t)entry_off,
(u_longlong_t)(entry_off + entry_run),
(u_longlong_t)entry_run,
(u_longlong_t)entry_vdev, words);
if (entry_type == 'A')
alloc += entry_run;
else
alloc -= entry_run;
entry_id++;
}
if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%lld) INCONSISTENT "
"with space map summary (%lld)\n",
(longlong_t)space_map_allocated(sm), (longlong_t)alloc);
}
}
static void
dump_metaslab_stats(metaslab_t *msp)
{
char maxbuf[32];
- range_tree_t *rt = msp->ms_allocatable;
+ zfs_range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
- int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
+ int free_pct = zfs_range_tree_space(rt) * 100 / msp->ms_size;
/* max sure nicenum has enough space */
_Static_assert(sizeof (maxbuf) >= NN_NUMBUF_SZ, "maxbuf truncated");
zdb_nicenum(metaslab_largest_allocatable(msp), maxbuf, sizeof (maxbuf));
(void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
"segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
"freepct", free_pct);
(void) printf("\tIn-memory histogram:\n");
- dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
+ dump_histogram(rt->rt_histogram, ZFS_RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
dump_metaslab(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
char freebuf[32];
zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
sizeof (freebuf));
(void) printf(
"\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
(u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
(u_longlong_t)space_map_object(sm), freebuf);
if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
- range_tree_stat_verify(msp->ms_allocatable);
+ zfs_range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp);
metaslab_unload(msp);
mutex_exit(&msp->ms_lock);
}
if (dump_opt['m'] > 1 && sm != NULL &&
spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
/*
* The space map histogram represents free space in chunks
* of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
*/
(void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
(u_longlong_t)msp->ms_fragmentation);
dump_histogram(sm->sm_phys->smp_histogram,
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
if (vd->vdev_ops == &vdev_draid_ops)
ASSERT3U(msp->ms_size, <=, 1ULL << vd->vdev_ms_shift);
else
ASSERT3U(msp->ms_size, ==, 1ULL << vd->vdev_ms_shift);
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
(void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
(u_longlong_t)metaslab_unflushed_txg(msp));
}
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *bias_str = "";
if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
bias_str = VDEV_ALLOC_BIAS_LOG;
} else if (alloc_bias == VDEV_BIAS_SPECIAL) {
bias_str = VDEV_ALLOC_BIAS_SPECIAL;
} else if (alloc_bias == VDEV_BIAS_DEDUP) {
bias_str = VDEV_ALLOC_BIAS_DEDUP;
}
uint64_t ms_flush_data_obj = 0;
if (vd->vdev_top_zap != 0) {
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &ms_flush_data_obj);
if (error != ENOENT) {
ASSERT0(error);
}
}
(void) printf("\tvdev %10llu %s",
(u_longlong_t)vd->vdev_id, bias_str);
if (ms_flush_data_obj != 0) {
(void) printf(" ms_unflushed_phys object %llu",
(u_longlong_t)ms_flush_data_obj);
}
(void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %12s\n",
"---------------", "-------------------",
"---------------", "------------");
}
static void
dump_metaslab_groups(spa_t *spa, boolean_t show_special)
{
vdev_t *rvd = spa->spa_root_vdev;
metaslab_class_t *mc = spa_normal_class(spa);
metaslab_class_t *smc = spa_special_class(spa);
uint64_t fragmentation;
metaslab_class_histogram_verify(mc);
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || (mg->mg_class != mc &&
(!show_special || mg->mg_class != smc)))
continue;
metaslab_group_histogram_verify(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
(void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
"fragmentation",
(u_longlong_t)tvd->vdev_id,
(u_longlong_t)tvd->vdev_ms_count);
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
(void) printf("%3s\n", "-");
} else {
(void) printf("%3llu%%\n",
(u_longlong_t)mg->mg_fragmentation);
}
- dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
+ dump_histogram(mg->mg_histogram,
+ ZFS_RANGE_TREE_HISTOGRAM_SIZE, 0);
}
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
fragmentation = metaslab_class_fragmentation(mc);
if (fragmentation == ZFS_FRAG_INVALID)
(void) printf("\t%3s\n", "-");
else
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
- dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
+ dump_histogram(mc->mc_histogram, ZFS_RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
print_vdev_indirect(vdev_t *vd)
{
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
if (vim == NULL) {
ASSERT3P(vib, ==, NULL);
return;
}
ASSERT3U(vdev_indirect_mapping_object(vim), ==,
vic->vic_mapping_object);
ASSERT3U(vdev_indirect_births_object(vib), ==,
vic->vic_births_object);
(void) printf("indirect births obj %llu:\n",
(longlong_t)vic->vic_births_object);
(void) printf(" vib_count = %llu\n",
(longlong_t)vdev_indirect_births_count(vib));
for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
vdev_indirect_birth_entry_phys_t *cur_vibe =
&vib->vib_entries[i];
(void) printf("\toffset %llx -> txg %llu\n",
(longlong_t)cur_vibe->vibe_offset,
(longlong_t)cur_vibe->vibe_phys_birth_txg);
}
(void) printf("\n");
(void) printf("indirect mapping obj %llu:\n",
(longlong_t)vic->vic_mapping_object);
(void) printf(" vim_max_offset = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_max_offset(vim));
(void) printf(" vim_bytes_mapped = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
(void) printf(" vim_count = %llu\n",
(longlong_t)vdev_indirect_mapping_num_entries(vim));
if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
return;
uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
(void) printf("\t<%llx:%llx:%llx> -> "
"<%llx:%llx:%llx> (%x obsolete)\n",
(longlong_t)vd->vdev_id,
(longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
(longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
counts[i]);
}
(void) printf("\n");
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
(void) printf("obsolete space map object %llu:\n",
(u_longlong_t)obsolete_sm_object);
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
obsolete_sm_object);
dump_spacemap(mos, vd->vdev_obsolete_sm);
(void) printf("\n");
}
}
static void
dump_metaslabs(spa_t *spa)
{
vdev_t *vd, *rvd = spa->spa_root_vdev;
uint64_t m, c = 0, children = rvd->vdev_children;
(void) printf("\nMetaslabs:\n");
if (!dump_opt['d'] && zopt_metaslab_args > 0) {
c = zopt_metaslab[0];
if (c >= children)
(void) fatal("bad vdev id: %llu", (u_longlong_t)c);
if (zopt_metaslab_args > 1) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
for (m = 1; m < zopt_metaslab_args; m++) {
if (zopt_metaslab[m] < vd->vdev_ms_count)
dump_metaslab(
vd->vdev_ms[zopt_metaslab[m]]);
else
(void) fprintf(stderr, "bad metaslab "
"number %llu\n",
(u_longlong_t)zopt_metaslab[m]);
}
(void) printf("\n");
return;
}
children = c + 1;
}
for (; c < children; c++) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
print_vdev_indirect(vd);
for (m = 0; m < vd->vdev_ms_count; m++)
dump_metaslab(vd->vdev_ms[m]);
(void) printf("\n");
}
}
static void
dump_log_spacemaps(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
(void) printf("\nLog Space Maps in Pool:\n");
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
(void) printf("Log Spacemap object %llu txg %llu\n",
(u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
dump_spacemap(spa->spa_meta_objset, sm);
space_map_close(sm);
}
(void) printf("\n");
}
static void
dump_ddt_entry(const ddt_t *ddt, const ddt_lightweight_entry_t *ddlwe,
uint64_t index)
{
const ddt_key_t *ddk = &ddlwe->ddlwe_key;
char blkbuf[BP_SPRINTF_LEN];
blkptr_t blk;
int p;
for (p = 0; p < DDT_NPHYS(ddt); p++) {
const ddt_univ_phys_t *ddp = &ddlwe->ddlwe_phys;
ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
if (ddt_phys_birth(ddp, v) == 0)
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, v, &blk);
snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
(void) printf("index %llx refcnt %llu phys %d %s\n",
(u_longlong_t)index, (u_longlong_t)ddt_phys_refcnt(ddp, v),
p, blkbuf);
}
}
static void
dump_dedup_ratio(const ddt_stat_t *dds)
{
double rL, rP, rD, D, dedup, compress, copies;
if (dds->dds_blocks == 0)
return;
rL = (double)dds->dds_ref_lsize;
rP = (double)dds->dds_ref_psize;
rD = (double)dds->dds_ref_dsize;
D = (double)dds->dds_dsize;
dedup = rD / D;
compress = rL / rP;
copies = rD / rP;
(void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
"dedup * compress / copies = %.2f\n\n",
dedup, compress, copies, dedup * compress / copies);
}
static void
dump_ddt_log(ddt_t *ddt)
{
if (ddt->ddt_version != DDT_VERSION_FDT ||
!(ddt->ddt_flags & DDT_FLAG_LOG))
return;
for (int n = 0; n < 2; n++) {
ddt_log_t *ddl = &ddt->ddt_log[n];
char flagstr[64] = {0};
if (ddl->ddl_flags > 0) {
flagstr[0] = ' ';
int c = 1;
if (ddl->ddl_flags & DDL_FLAG_FLUSHING)
c += strlcpy(&flagstr[c], " FLUSHING",
sizeof (flagstr) - c);
if (ddl->ddl_flags & DDL_FLAG_CHECKPOINT)
c += strlcpy(&flagstr[c], " CHECKPOINT",
sizeof (flagstr) - c);
if (ddl->ddl_flags &
~(DDL_FLAG_FLUSHING|DDL_FLAG_CHECKPOINT))
c += strlcpy(&flagstr[c], " UNKNOWN",
sizeof (flagstr) - c);
flagstr[1] = '[';
flagstr[c++] = ']';
}
uint64_t count = avl_numnodes(&ddl->ddl_tree);
printf(DMU_POOL_DDT_LOG ": flags=0x%02x%s; obj=%llu; "
"len=%llu; txg=%llu; entries=%llu\n",
zio_checksum_table[ddt->ddt_checksum].ci_name, n,
ddl->ddl_flags, flagstr,
(u_longlong_t)ddl->ddl_object,
(u_longlong_t)ddl->ddl_length,
(u_longlong_t)ddl->ddl_first_txg, (u_longlong_t)count);
if (ddl->ddl_flags & DDL_FLAG_CHECKPOINT) {
const ddt_key_t *ddk = &ddl->ddl_checkpoint;
printf(" checkpoint: "
"%016llx:%016llx:%016llx:%016llx:%016llx\n",
(u_longlong_t)ddk->ddk_cksum.zc_word[0],
(u_longlong_t)ddk->ddk_cksum.zc_word[1],
(u_longlong_t)ddk->ddk_cksum.zc_word[2],
(u_longlong_t)ddk->ddk_cksum.zc_word[3],
(u_longlong_t)ddk->ddk_prop);
}
if (count == 0 || dump_opt['D'] < 4)
continue;
ddt_lightweight_entry_t ddlwe;
uint64_t index = 0;
for (ddt_log_entry_t *ddle = avl_first(&ddl->ddl_tree);
ddle; ddle = AVL_NEXT(&ddl->ddl_tree, ddle)) {
DDT_LOG_ENTRY_TO_LIGHTWEIGHT(ddt, ddle, &ddlwe);
dump_ddt_entry(ddt, &ddlwe, index++);
}
}
}
static void
dump_ddt_object(ddt_t *ddt, ddt_type_t type, ddt_class_t class)
{
char name[DDT_NAMELEN];
ddt_lightweight_entry_t ddlwe;
uint64_t walk = 0;
dmu_object_info_t doi;
uint64_t count, dspace, mspace;
int error;
error = ddt_object_info(ddt, type, class, &doi);
if (error == ENOENT)
return;
ASSERT(error == 0);
error = ddt_object_count(ddt, type, class, &count);
ASSERT(error == 0);
if (count == 0)
return;
dspace = doi.doi_physical_blocks_512 << 9;
mspace = doi.doi_fill_count * doi.doi_data_block_size;
ddt_object_name(ddt, type, class, name);
(void) printf("%s: dspace=%llu; mspace=%llu; entries=%llu\n", name,
(u_longlong_t)dspace, (u_longlong_t)mspace, (u_longlong_t)count);
if (dump_opt['D'] < 3)
return;
zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
if (dump_opt['D'] < 4)
return;
if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
return;
(void) printf("%s contents:\n\n", name);
while ((error = ddt_object_walk(ddt, type, class, &walk, &ddlwe)) == 0)
dump_ddt_entry(ddt, &ddlwe, walk);
ASSERT3U(error, ==, ENOENT);
(void) printf("\n");
}
static void
dump_ddt(ddt_t *ddt)
{
if (!ddt || ddt->ddt_version == DDT_VERSION_UNCONFIGURED)
return;
char flagstr[64] = {0};
if (ddt->ddt_flags > 0) {
flagstr[0] = ' ';
int c = 1;
if (ddt->ddt_flags & DDT_FLAG_FLAT)
c += strlcpy(&flagstr[c], " FLAT",
sizeof (flagstr) - c);
if (ddt->ddt_flags & DDT_FLAG_LOG)
c += strlcpy(&flagstr[c], " LOG",
sizeof (flagstr) - c);
if (ddt->ddt_flags & ~DDT_FLAG_MASK)
c += strlcpy(&flagstr[c], " UNKNOWN",
sizeof (flagstr) - c);
flagstr[1] = '[';
flagstr[c] = ']';
}
printf("DDT-%s: version=%llu [%s]; flags=0x%02llx%s; rootobj=%llu\n",
zio_checksum_table[ddt->ddt_checksum].ci_name,
(u_longlong_t)ddt->ddt_version,
(ddt->ddt_version == 0) ? "LEGACY" :
(ddt->ddt_version == 1) ? "FDT" : "UNKNOWN",
(u_longlong_t)ddt->ddt_flags, flagstr,
(u_longlong_t)ddt->ddt_dir_object);
for (ddt_type_t type = 0; type < DDT_TYPES; type++)
for (ddt_class_t class = 0; class < DDT_CLASSES; class++)
dump_ddt_object(ddt, type, class);
dump_ddt_log(ddt);
}
static void
dump_all_ddts(spa_t *spa)
{
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++)
dump_ddt(spa->spa_ddt[c]);
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_blocks == 0) {
(void) printf("All DDTs are empty\n");
return;
}
(void) printf("\n");
if (dump_opt['D'] > 1) {
(void) printf("DDT histogram (aggregated over all DDTs):\n");
ddt_get_dedup_histogram(spa, &ddh_total);
zpool_dump_ddt(&dds_total, &ddh_total);
}
dump_dedup_ratio(&dds_total);
/*
* Dump a histogram of unique class entry age
*/
if (dump_opt['D'] == 3 && getenv("ZDB_DDT_UNIQUE_AGE_HIST") != NULL) {
ddt_age_histo_t histogram;
(void) printf("DDT walk unique, building age histogram...\n");
ddt_prune_walk(spa, 0, &histogram);
/*
* print out histogram for unique entry class birth
*/
if (histogram.dah_entries > 0) {
(void) printf("%5s %9s %4s\n",
"age", "blocks", "amnt");
(void) printf("%5s %9s %4s\n",
"-----", "---------", "----");
for (int i = 0; i < HIST_BINS; i++) {
(void) printf("%5d %9d %4d%%\n", 1 << i,
(int)histogram.dah_age_histo[i],
(int)((histogram.dah_age_histo[i] * 100) /
histogram.dah_entries));
}
}
}
}
static void
dump_brt(spa_t *spa)
{
if (!spa_feature_is_enabled(spa, SPA_FEATURE_BLOCK_CLONING)) {
printf("BRT: unsupported on this pool\n");
return;
}
if (!spa_feature_is_active(spa, SPA_FEATURE_BLOCK_CLONING)) {
printf("BRT: empty\n");
return;
}
char count[32], used[32], saved[32];
zdb_nicebytes(brt_get_used(spa), used, sizeof (used));
zdb_nicebytes(brt_get_saved(spa), saved, sizeof (saved));
uint64_t ratio = brt_get_ratio(spa);
printf("BRT: used %s; saved %s; ratio %llu.%02llux\n", used, saved,
(u_longlong_t)(ratio / 100), (u_longlong_t)(ratio % 100));
if (dump_opt['T'] < 2)
return;
for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
if (!brtvd->bv_initiated) {
printf("BRT: vdev %" PRIu64 ": empty\n", vdevid);
continue;
}
zdb_nicenum(brtvd->bv_totalcount, count, sizeof (count));
zdb_nicebytes(brtvd->bv_usedspace, used, sizeof (used));
zdb_nicebytes(brtvd->bv_savedspace, saved, sizeof (saved));
printf("BRT: vdev %" PRIu64 ": refcnt %s; used %s; saved %s\n",
vdevid, count, used, saved);
}
if (dump_opt['T'] < 3)
return;
/* -TTT shows a per-vdev histograms; -TTTT shows all entries */
boolean_t do_histo = dump_opt['T'] == 3;
char dva[64];
if (!do_histo)
printf("\n%-16s %-10s\n", "DVA", "REFCNT");
for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
if (!brtvd->bv_initiated)
continue;
uint64_t counts[64] = {};
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
for (zap_cursor_init(&zc, spa->spa_meta_objset,
brtvd->bv_mos_entries);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
uint64_t refcnt;
VERIFY0(zap_lookup_uint64(spa->spa_meta_objset,
brtvd->bv_mos_entries,
(const uint64_t *)za->za_name, 1,
za->za_integer_length, za->za_num_integers,
&refcnt));
if (do_histo)
counts[highbit64(refcnt)]++;
else {
uint64_t offset =
*(const uint64_t *)za->za_name;
snprintf(dva, sizeof (dva), "%" PRIu64 ":%llx",
vdevid, (u_longlong_t)offset);
printf("%-16s %-10llu\n", dva,
(u_longlong_t)refcnt);
}
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
if (do_histo) {
printf("\nBRT: vdev %" PRIu64
": DVAs with 2^n refcnts:\n", vdevid);
dump_histogram(counts, 64, 0);
}
}
}
static void
dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
{
char *prefix = arg;
(void) printf("%s [%llu,%llu) length %llu\n",
prefix,
(u_longlong_t)start,
(u_longlong_t)(start + size),
(u_longlong_t)(size));
}
static void
dump_dtl(vdev_t *vd, int indent)
{
spa_t *spa = vd->vdev_spa;
boolean_t required;
const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
"outage" };
char prefix[256];
spa_vdev_state_enter(spa, SCL_NONE);
required = vdev_dtl_required(vd);
(void) spa_vdev_state_exit(spa, NULL, 0);
if (indent == 0)
(void) printf("\nDirty time logs:\n\n");
(void) printf("\t%*s%s [%s]\n", indent, "",
vd->vdev_path ? vd->vdev_path :
vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
required ? "DTL-required" : "DTL-expendable");
for (int t = 0; t < DTL_TYPES; t++) {
- range_tree_t *rt = vd->vdev_dtl[t];
- if (range_tree_space(rt) == 0)
+ zfs_range_tree_t *rt = vd->vdev_dtl[t];
+ if (zfs_range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
- range_tree_walk(rt, dump_dtl_seg, prefix);
+ zfs_range_tree_walk(rt, dump_dtl_seg, prefix);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
dump_dtl(vd->vdev_child[c], indent + 4);
}
static void
dump_history(spa_t *spa)
{
nvlist_t **events = NULL;
char *buf;
uint64_t resid, len, off = 0;
uint_t num = 0;
int error;
char tbuf[30];
if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
(void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
__func__);
return;
}
do {
len = SPA_OLD_MAXBLOCKSIZE;
if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
(void) fprintf(stderr, "Unable to read history: "
"error %d\n", error);
free(buf);
return;
}
if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
break;
off -= resid;
} while (len != 0);
(void) printf("\nHistory:\n");
for (unsigned i = 0; i < num; i++) {
boolean_t printed = B_FALSE;
if (nvlist_exists(events[i], ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
} else {
tbuf[0] = '\0';
}
if (nvlist_exists(events[i], ZPOOL_HIST_CMD)) {
(void) printf("%s %s\n", tbuf,
fnvlist_lookup_string(events[i], ZPOOL_HIST_CMD));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_EVENT)) {
uint64_t ievent;
ievent = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_INT_EVENT);
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
goto next;
(void) printf(" %s [internal %s txg:%ju] %s\n",
tbuf,
zfs_history_event_names[ievent],
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_NAME)) {
(void) printf("%s [txg:%ju] %s", tbuf,
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_NAME));
if (nvlist_exists(events[i], ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(events[i],
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(
events[i],
ZPOOL_HIST_DSID));
}
(void) printf(" %s\n", fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_IOCTL)) {
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(events[i],
ZPOOL_HIST_IOCTL));
if (nvlist_exists(events[i], ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(events[i],
ZPOOL_HIST_ERRNO));
}
} else {
goto next;
}
printed = B_TRUE;
next:
if (dump_opt['h'] > 1) {
if (!printed)
(void) printf("unrecognized record:\n");
dump_nvlist(events[i], 2);
}
}
free(buf);
}
static void
dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static uint64_t
blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb)
{
if (dnp == NULL) {
ASSERT(zb->zb_level < 0);
if (zb->zb_object == 0)
return (zb->zb_blkid);
return (zb->zb_blkid * BP_GET_LSIZE(bp));
}
ASSERT(zb->zb_level >= 0);
return ((zb->zb_blkid <<
(zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
}
static void
snprintf_zstd_header(spa_t *spa, char *blkbuf, size_t buflen,
const blkptr_t *bp)
{
static abd_t *pabd = NULL;
void *buf;
zio_t *zio;
zfs_zstdhdr_t zstd_hdr;
int error;
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_ZSTD)
return;
if (BP_IS_HOLE(bp))
return;
if (BP_IS_EMBEDDED(bp)) {
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
zdb_exit(1);
}
decode_embedded_bp_compressed(bp, buf);
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
free(buf);
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:EMBEDDED",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
return;
}
if (!pabd)
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
zio = zio_root(spa, NULL, NULL, 0);
/* Decrypt but don't decompress so we can read the compression header */
zio_nowait(zio_read(zio, spa, bp, pabd, BP_GET_PSIZE(bp), NULL, NULL,
ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW_COMPRESS,
NULL));
error = zio_wait(zio);
if (error) {
(void) fprintf(stderr, "read failed: %d\n", error);
return;
}
buf = abd_borrow_buf_copy(pabd, BP_GET_LSIZE(bp));
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:NORMAL",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
abd_return_buf_copy(pabd, buf, BP_GET_LSIZE(bp));
}
static void
snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
boolean_t bp_freed)
{
const dva_t *dva = bp->blk_dva;
int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
int i;
if (dump_opt['b'] >= 6) {
snprintf_blkptr(blkbuf, buflen, bp);
if (bp_freed) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
}
return;
}
if (BP_IS_EMBEDDED(bp)) {
(void) sprintf(blkbuf,
"EMBEDDED et=%u %llxL/%llxP B=%llu",
(int)BPE_GET_ETYPE(bp),
(u_longlong_t)BPE_GET_LSIZE(bp),
(u_longlong_t)BPE_GET_PSIZE(bp),
(u_longlong_t)BP_GET_LOGICAL_BIRTH(bp));
return;
}
blkbuf[0] = '\0';
for (i = 0; i < ndvas; i++)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), "%llu:%llx:%llx ",
(u_longlong_t)DVA_GET_VDEV(&dva[i]),
(u_longlong_t)DVA_GET_OFFSET(&dva[i]),
(u_longlong_t)DVA_GET_ASIZE(&dva[i]));
if (BP_IS_HOLE(bp)) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL B=%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)BP_GET_LOGICAL_BIRTH(bp));
} else {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL/%llxP F=%llu B=%llu/%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)BP_GET_PSIZE(bp),
(u_longlong_t)BP_GET_FILL(bp),
(u_longlong_t)BP_GET_LOGICAL_BIRTH(bp),
(u_longlong_t)BP_GET_BIRTH(bp));
if (bp_freed)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" cksum=%016llx:%016llx:%016llx:%016llx",
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
}
}
static void
print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
int l;
if (!BP_IS_EMBEDDED(bp)) {
ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
}
(void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
ASSERT(zb->zb_level >= 0);
for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
if (l == zb->zb_level) {
(void) printf("L%llx", (u_longlong_t)zb->zb_level);
} else {
(void) printf(" ");
}
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, B_FALSE);
if (dump_opt['Z'] && BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD)
snprintf_zstd_header(spa, blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
int err = 0;
if (BP_GET_LOGICAL_BIRTH(bp) == 0)
return (0);
print_indirect(spa, bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
uint64_t fill = 0;
ASSERT(!BP_IS_REDACTED(bp));
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
if (err)
return (err);
ASSERT(buf->b_data);
/* recursively visit blocks below this */
cbp = buf->b_data;
for (i = 0; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
err = visit_indirect(spa, dnp, cbp, &czb);
if (err)
break;
fill += BP_GET_FILL(cbp);
}
if (!err)
ASSERT3U(fill, ==, BP_GET_FILL(bp));
arc_buf_destroy(buf, &buf);
}
return (err);
}
static void
dump_indirect(dnode_t *dn)
{
dnode_phys_t *dnp = dn->dn_phys;
zbookmark_phys_t czb;
(void) printf("Indirect blocks:\n");
SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
dn->dn_object, dnp->dn_nlevels - 1, 0);
for (int j = 0; j < dnp->dn_nblkptr; j++) {
czb.zb_blkid = j;
(void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
&dnp->dn_blkptr[j], &czb);
}
(void) printf("\n");
}
static void
dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dir_phys_t *dd = data;
time_t crtime;
char nice[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (nice) >= NN_NUMBUF_SZ, "nice truncated");
if (dd == NULL)
return;
ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
crtime = dd->dd_creation_time;
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\thead_dataset_obj = %llu\n",
(u_longlong_t)dd->dd_head_dataset_obj);
(void) printf("\t\tparent_dir_obj = %llu\n",
(u_longlong_t)dd->dd_parent_obj);
(void) printf("\t\torigin_obj = %llu\n",
(u_longlong_t)dd->dd_origin_obj);
(void) printf("\t\tchild_dir_zapobj = %llu\n",
(u_longlong_t)dd->dd_child_dir_zapobj);
zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
(void) printf("\t\tused_bytes = %s\n", nice);
zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
(void) printf("\t\tcompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
(void) printf("\t\tuncompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
(void) printf("\t\tquota = %s\n", nice);
zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
(void) printf("\t\treserved = %s\n", nice);
(void) printf("\t\tprops_zapobj = %llu\n",
(u_longlong_t)dd->dd_props_zapobj);
(void) printf("\t\tdeleg_zapobj = %llu\n",
(u_longlong_t)dd->dd_deleg_zapobj);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)dd->dd_flags);
#define DO(which) \
zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
sizeof (nice)); \
(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
DO(HEAD);
DO(SNAP);
DO(CHILD);
DO(CHILD_RSRV);
DO(REFRSRV);
#undef DO
(void) printf("\t\tclones = %llu\n",
(u_longlong_t)dd->dd_clones);
}
static void
dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dataset_phys_t *ds = data;
time_t crtime;
char used[32], compressed[32], uncompressed[32], unique[32];
char blkbuf[BP_SPRINTF_LEN];
/* make sure nicenum has enough space */
_Static_assert(sizeof (used) >= NN_NUMBUF_SZ, "used truncated");
_Static_assert(sizeof (compressed) >= NN_NUMBUF_SZ,
"compressed truncated");
_Static_assert(sizeof (uncompressed) >= NN_NUMBUF_SZ,
"uncompressed truncated");
_Static_assert(sizeof (unique) >= NN_NUMBUF_SZ, "unique truncated");
if (ds == NULL)
return;
ASSERT(size == sizeof (*ds));
crtime = ds->ds_creation_time;
zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
sizeof (uncompressed));
zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
(void) printf("\t\tdir_obj = %llu\n",
(u_longlong_t)ds->ds_dir_obj);
(void) printf("\t\tprev_snap_obj = %llu\n",
(u_longlong_t)ds->ds_prev_snap_obj);
(void) printf("\t\tprev_snap_txg = %llu\n",
(u_longlong_t)ds->ds_prev_snap_txg);
(void) printf("\t\tnext_snap_obj = %llu\n",
(u_longlong_t)ds->ds_next_snap_obj);
(void) printf("\t\tsnapnames_zapobj = %llu\n",
(u_longlong_t)ds->ds_snapnames_zapobj);
(void) printf("\t\tnum_children = %llu\n",
(u_longlong_t)ds->ds_num_children);
(void) printf("\t\tuserrefs_obj = %llu\n",
(u_longlong_t)ds->ds_userrefs_obj);
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\tcreation_txg = %llu\n",
(u_longlong_t)ds->ds_creation_txg);
(void) printf("\t\tdeadlist_obj = %llu\n",
(u_longlong_t)ds->ds_deadlist_obj);
(void) printf("\t\tused_bytes = %s\n", used);
(void) printf("\t\tcompressed_bytes = %s\n", compressed);
(void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
(void) printf("\t\tunique = %s\n", unique);
(void) printf("\t\tfsid_guid = %llu\n",
(u_longlong_t)ds->ds_fsid_guid);
(void) printf("\t\tguid = %llu\n",
(u_longlong_t)ds->ds_guid);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)ds->ds_flags);
(void) printf("\t\tnext_clones_obj = %llu\n",
(u_longlong_t)ds->ds_next_clones_obj);
(void) printf("\t\tprops_obj = %llu\n",
(u_longlong_t)ds->ds_props_obj);
(void) printf("\t\tbp = %s\n", blkbuf);
}
static int
dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
if (BP_GET_LOGICAL_BIRTH(bp) != 0) {
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("\t%s\n", blkbuf);
}
return (0);
}
static void
dump_bptree(objset_t *os, uint64_t obj, const char *name)
{
char bytes[32];
bptree_phys_t *bt;
dmu_buf_t *db;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
if (dump_opt['d'] < 3)
return;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
(void) printf("\n %s: %llu datasets, %s\n",
name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
dmu_buf_rele(db, FTAG);
if (dump_opt['d'] < 5)
return;
(void) printf("\n");
(void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
}
static int
dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
ASSERT(BP_GET_LOGICAL_BIRTH(bp) != 0);
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, bp_freed);
(void) printf("\t%s\n", blkbuf);
return (0);
}
static void
dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
{
char bytes[32];
char comp[32];
char uncomp[32];
uint64_t i;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (dump_opt['d'] < 3)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu freed, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
} else {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
}
for (i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
bpobj_close(&subbpo);
}
} else {
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%llu freed, %s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
bytes);
} else {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
bytes);
}
}
if (dump_opt['d'] < 5)
return;
if (indent == 0) {
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
(void) printf("\n");
}
}
static int
dump_bookmark(dsl_pool_t *dp, char *name, boolean_t print_redact,
boolean_t print_list)
{
int err = 0;
zfs_bookmark_phys_t prop;
objset_t *mos = dp->dp_spa->spa_meta_objset;
err = dsl_bookmark_lookup(dp, name, NULL, &prop);
if (err != 0) {
return (err);
}
(void) printf("\t#%s: ", strchr(name, '#') + 1);
(void) printf("{guid: %llx creation_txg: %llu creation_time: "
"%llu redaction_obj: %llu}\n", (u_longlong_t)prop.zbm_guid,
(u_longlong_t)prop.zbm_creation_txg,
(u_longlong_t)prop.zbm_creation_time,
(u_longlong_t)prop.zbm_redaction_obj);
IMPLY(print_list, print_redact);
if (!print_redact || prop.zbm_redaction_obj == 0)
return (0);
redaction_list_t *rl;
VERIFY0(dsl_redaction_list_hold_obj(dp,
prop.zbm_redaction_obj, FTAG, &rl));
redaction_list_phys_t *rlp = rl->rl_phys;
(void) printf("\tRedacted:\n\t\tProgress: ");
if (rlp->rlp_last_object != UINT64_MAX ||
rlp->rlp_last_blkid != UINT64_MAX) {
(void) printf("%llu %llu (incomplete)\n",
(u_longlong_t)rlp->rlp_last_object,
(u_longlong_t)rlp->rlp_last_blkid);
} else {
(void) printf("complete\n");
}
(void) printf("\t\tSnapshots: [");
for (unsigned int i = 0; i < rlp->rlp_num_snaps; i++) {
if (i > 0)
(void) printf(", ");
(void) printf("%0llu",
(u_longlong_t)rlp->rlp_snaps[i]);
}
(void) printf("]\n\t\tLength: %llu\n",
(u_longlong_t)rlp->rlp_num_entries);
if (!print_list) {
dsl_redaction_list_rele(rl, FTAG);
return (0);
}
if (rlp->rlp_num_entries == 0) {
dsl_redaction_list_rele(rl, FTAG);
(void) printf("\t\tRedaction List: []\n\n");
return (0);
}
redact_block_phys_t *rbp_buf;
uint64_t size;
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, prop.zbm_redaction_obj, &doi));
size = doi.doi_max_offset;
rbp_buf = kmem_alloc(size, KM_SLEEP);
err = dmu_read(mos, prop.zbm_redaction_obj, 0, size,
rbp_buf, 0);
if (err != 0) {
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
return (err);
}
(void) printf("\t\tRedaction List: [{object: %llx, offset: "
"%llx, blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[0].rbp_object,
(u_longlong_t)rbp_buf[0].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[0])),
(u_longlong_t)redact_block_get_count(&rbp_buf[0]));
for (size_t i = 1; i < rlp->rlp_num_entries; i++) {
(void) printf(",\n\t\t{object: %llx, offset: %llx, "
"blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[i].rbp_object,
(u_longlong_t)rbp_buf[i].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[i])),
(u_longlong_t)redact_block_get_count(&rbp_buf[i]));
}
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
(void) printf("]\n\n");
return (0);
}
static void
dump_bookmarks(objset_t *os, int verbosity)
{
zap_cursor_t zc;
zap_attribute_t *attrp;
dsl_dataset_t *ds = dmu_objset_ds(os);
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
objset_t *mos = os->os_spa->spa_meta_objset;
if (verbosity < 4)
return;
attrp = zap_attribute_alloc();
dsl_pool_config_enter(dp, FTAG);
for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
zap_cursor_retrieve(&zc, attrp) == 0;
zap_cursor_advance(&zc)) {
char osname[ZFS_MAX_DATASET_NAME_LEN];
char buf[ZFS_MAX_DATASET_NAME_LEN];
int len;
dmu_objset_name(os, osname);
len = snprintf(buf, sizeof (buf), "%s#%s", osname,
attrp->za_name);
VERIFY3S(len, <, ZFS_MAX_DATASET_NAME_LEN);
(void) dump_bookmark(dp, buf, verbosity >= 5, verbosity >= 6);
}
zap_cursor_fini(&zc);
dsl_pool_config_exit(dp, FTAG);
zap_attribute_free(attrp);
}
static void
bpobj_count_refd(bpobj_t *bpo)
{
mos_obj_refd(bpo->bpo_object);
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
mos_obj_refd(bpo->bpo_phys->bpo_subobjs);
for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
bpobj_count_refd(&subbpo);
bpobj_close(&subbpo);
}
}
}
static int
dsl_deadlist_entry_count_refd(void *arg, dsl_deadlist_entry_t *dle)
{
spa_t *spa = arg;
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dle->dle_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dle->dle_bpobj);
return (0);
}
static int
dsl_deadlist_entry_dump(void *arg, dsl_deadlist_entry_t *dle)
{
ASSERT(arg == NULL);
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf),
"mintxg %llu -> obj %llu",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
dump_full_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
}
return (0);
}
static void
dump_blkptr_list(dsl_deadlist_t *dl, const char *name)
{
char bytes[32];
char comp[32];
char uncomp[32];
char entries[32];
spa_t *spa = dmu_objset_spa(dl->dl_os);
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dl->dl_oldfmt) {
if (dl->dl_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dl->dl_bpobj);
} else {
mos_obj_refd(dl->dl_object);
dsl_deadlist_iterate(dl, dsl_deadlist_entry_count_refd, spa);
}
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
_Static_assert(sizeof (entries) >= NN_NUMBUF_SZ, "entries truncated");
if (dump_opt['d'] < 3)
return;
if (dl->dl_oldfmt) {
dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
return;
}
zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
zdb_nicenum(avl_numnodes(&dl->dl_tree), entries, sizeof (entries));
(void) printf("\n %s: %s (%s/%s comp), %s entries\n",
name, bytes, comp, uncomp, entries);
if (dump_opt['d'] < 4)
return;
(void) putchar('\n');
dsl_deadlist_iterate(dl, dsl_deadlist_entry_dump, NULL);
}
static int
verify_dd_livelist(objset_t *os)
{
uint64_t ll_used, used, ll_comp, comp, ll_uncomp, uncomp;
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
ASSERT(!dmu_objset_is_snapshot(os));
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return (0);
/* Iterate through the livelist to check for duplicates */
dsl_deadlist_iterate(&dd->dd_livelist, sublivelist_verify_lightweight,
NULL);
dsl_pool_config_enter(dp, FTAG);
dsl_deadlist_space(&dd->dd_livelist, &ll_used,
&ll_comp, &ll_uncomp);
dsl_dataset_t *origin_ds;
ASSERT(dsl_pool_config_held(dp));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin_ds));
VERIFY0(dsl_dataset_space_written(origin_ds, os->os_dsl_dataset,
&used, &comp, &uncomp));
dsl_dataset_rele(origin_ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
/*
* It's possible that the dataset's uncomp space is larger than the
* livelist's because livelists do not track embedded block pointers
*/
if (used != ll_used || comp != ll_comp || uncomp < ll_uncomp) {
char nice_used[32], nice_comp[32], nice_uncomp[32];
(void) printf("Discrepancy in space accounting:\n");
zdb_nicenum(used, nice_used, sizeof (nice_used));
zdb_nicenum(comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("dir: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
zdb_nicenum(ll_used, nice_used, sizeof (nice_used));
zdb_nicenum(ll_comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(ll_uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("livelist: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
return (1);
}
return (0);
}
static char *key_material = NULL;
static boolean_t
zdb_derive_key(dsl_dir_t *dd, uint8_t *key_out)
{
uint64_t keyformat, salt, iters;
int i;
unsigned char c;
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), sizeof (uint64_t),
1, &keyformat));
switch (keyformat) {
case ZFS_KEYFORMAT_HEX:
for (i = 0; i < WRAPPING_KEY_LEN * 2; i += 2) {
if (!isxdigit(key_material[i]) ||
!isxdigit(key_material[i+1]))
return (B_FALSE);
if (sscanf(&key_material[i], "%02hhx", &c) != 1)
return (B_FALSE);
key_out[i / 2] = c;
}
break;
case ZFS_KEYFORMAT_PASSPHRASE:
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset,
dd->dd_crypto_obj, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT),
sizeof (uint64_t), 1, &salt));
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset,
dd->dd_crypto_obj, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS),
sizeof (uint64_t), 1, &iters));
if (PKCS5_PBKDF2_HMAC_SHA1(key_material, strlen(key_material),
((uint8_t *)&salt), sizeof (uint64_t), iters,
WRAPPING_KEY_LEN, key_out) != 1)
return (B_FALSE);
break;
default:
fatal("no support for key format %u\n",
(unsigned int) keyformat);
}
return (B_TRUE);
}
static char encroot[ZFS_MAX_DATASET_NAME_LEN];
static boolean_t key_loaded = B_FALSE;
static void
zdb_load_key(objset_t *os)
{
dsl_pool_t *dp;
dsl_dir_t *dd, *rdd;
uint8_t key[WRAPPING_KEY_LEN];
uint64_t rddobj;
int err;
dp = spa_get_dsl(os->os_spa);
dd = os->os_dsl_dataset->ds_dir;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_ROOT_DDOBJ, sizeof (uint64_t), 1, &rddobj));
VERIFY0(dsl_dir_hold_obj(dd->dd_pool, rddobj, NULL, FTAG, &rdd));
dsl_dir_name(rdd, encroot);
dsl_dir_rele(rdd, FTAG);
if (!zdb_derive_key(dd, key))
fatal("couldn't derive encryption key");
dsl_pool_config_exit(dp, FTAG);
ASSERT3U(dsl_dataset_get_keystatus(dd), ==, ZFS_KEYSTATUS_UNAVAILABLE);
dsl_crypto_params_t *dcp;
nvlist_t *crypto_args;
crypto_args = fnvlist_alloc();
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)key, WRAPPING_KEY_LEN);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
NULL, crypto_args, &dcp));
err = spa_keystore_load_wkey(encroot, dcp, B_FALSE);
dsl_crypto_params_free(dcp, (err != 0));
fnvlist_free(crypto_args);
if (err != 0)
fatal(
"couldn't load encryption key for %s: %s",
encroot, err == ZFS_ERR_CRYPTO_NOTSUP ?
"crypto params not supported" : strerror(err));
ASSERT3U(dsl_dataset_get_keystatus(dd), ==, ZFS_KEYSTATUS_AVAILABLE);
printf("Unlocked encryption root: %s\n", encroot);
key_loaded = B_TRUE;
}
static void
zdb_unload_key(void)
{
if (!key_loaded)
return;
VERIFY0(spa_keystore_unload_wkey(encroot));
key_loaded = B_FALSE;
}
static avl_tree_t idx_tree;
static avl_tree_t domain_tree;
static boolean_t fuid_table_loaded;
static objset_t *sa_os = NULL;
static sa_attr_type_t *sa_attr_table = NULL;
static int
open_objset(const char *path, const void *tag, objset_t **osp)
{
int err;
uint64_t sa_attrs = 0;
uint64_t version = 0;
VERIFY3P(sa_os, ==, NULL);
/*
* We can't own an objset if it's redacted. Therefore, we do this
* dance: hold the objset, then acquire a long hold on its dataset, then
* release the pool (which is held as part of holding the objset).
*/
if (dump_opt['K']) {
/* decryption requested, try to load keys */
err = dmu_objset_hold(path, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset "
"'%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
/* succeeds or dies */
zdb_load_key(*osp);
/* release it all */
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele(dmu_objset_ds(*osp), tag);
}
int ds_hold_flags = key_loaded ? DS_HOLD_FLAG_DECRYPT : 0;
err = dmu_objset_hold_flags(path, ds_hold_flags, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset '%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
if (dmu_objset_type(*osp) == DMU_OST_ZFS &&
(key_loaded || !(*osp)->os_encrypted)) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &version);
if (version >= ZPL_VERSION_SA) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
8, 1, &sa_attrs);
}
err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
&sa_attr_table);
if (err != 0) {
(void) fprintf(stderr, "sa_setup failed: %s\n",
strerror(err));
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele_flags(dmu_objset_ds(*osp),
ds_hold_flags, tag);
*osp = NULL;
}
}
sa_os = *osp;
return (err);
}
static void
close_objset(objset_t *os, const void *tag)
{
VERIFY3P(os, ==, sa_os);
if (os->os_sa != NULL)
sa_tear_down(os);
dsl_dataset_long_rele(dmu_objset_ds(os), tag);
dsl_dataset_rele_flags(dmu_objset_ds(os),
key_loaded ? DS_HOLD_FLAG_DECRYPT : 0, tag);
sa_attr_table = NULL;
sa_os = NULL;
zdb_unload_key();
}
static void
fuid_table_destroy(void)
{
if (fuid_table_loaded) {
zfs_fuid_table_destroy(&idx_tree, &domain_tree);
fuid_table_loaded = B_FALSE;
}
}
/*
* Clean up DDT internal state. ddt_lookup() adds entries to ddt_tree, which on
* a live pool are normally cleaned up during ddt_sync(). We can't do that (and
* wouldn't want to anyway), but if we don't clean up the presence of stuff on
* ddt_tree will trip asserts in ddt_table_free(). So, we clean up ourselves.
*
* Note that this is not a particularly efficient way to do this, but
* ddt_remove() is the only public method that can do the work we need, and it
* requires the right locks and etc to do the job. This is only ever called
* during zdb shutdown so efficiency is not especially important.
*/
static void
zdb_ddt_cleanup(spa_t *spa)
{
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
if (!ddt)
continue;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
ddt_enter(ddt);
ddt_entry_t *dde = avl_first(&ddt->ddt_tree), *next;
while (dde) {
next = AVL_NEXT(&ddt->ddt_tree, dde);
dde->dde_io = NULL;
ddt_remove(ddt, dde);
dde = next;
}
ddt_exit(ddt);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
}
static void
zdb_exit(int reason)
{
if (spa != NULL)
zdb_ddt_cleanup(spa);
if (os != NULL) {
close_objset(os, FTAG);
} else if (spa != NULL) {
spa_close(spa, FTAG);
}
fuid_table_destroy();
if (kernel_init_done)
kernel_fini();
exit(reason);
}
/*
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
* the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
{
if (FUID_INDEX(id)) {
const char *domain =
zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
(void) printf("\t%s %llx [%s-%d]\n", id_type,
(u_longlong_t)id, domain, (int)FUID_RID(id));
} else {
(void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
}
}
static void
dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
{
uint32_t uid_idx, gid_idx;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
/* Load domain table, if not already loaded */
if (!fuid_table_loaded && (uid_idx || gid_idx)) {
uint64_t fuid_obj;
/* first find the fuid object. It lives in the master node */
VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
8, 1, &fuid_obj) == 0);
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
(void) zfs_fuid_table_load(os, fuid_obj,
&idx_tree, &domain_tree);
fuid_table_loaded = B_TRUE;
}
print_idstr(uid, "uid");
print_idstr(gid, "gid");
}
static void
dump_znode_sa_xattr(sa_handle_t *hdl)
{
nvlist_t *sa_xattr;
nvpair_t *elem = NULL;
int sa_xattr_size = 0;
int sa_xattr_entries = 0;
int error;
char *sa_xattr_packed;
error = sa_size(hdl, sa_attr_table[ZPL_DXATTR], &sa_xattr_size);
if (error || sa_xattr_size == 0)
return;
sa_xattr_packed = malloc(sa_xattr_size);
if (sa_xattr_packed == NULL)
return;
error = sa_lookup(hdl, sa_attr_table[ZPL_DXATTR],
sa_xattr_packed, sa_xattr_size);
if (error) {
free(sa_xattr_packed);
return;
}
error = nvlist_unpack(sa_xattr_packed, sa_xattr_size, &sa_xattr, 0);
if (error) {
free(sa_xattr_packed);
return;
}
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL)
sa_xattr_entries++;
(void) printf("\tSA xattrs: %d bytes, %d entries\n\n",
sa_xattr_size, sa_xattr_entries);
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL) {
boolean_t can_print = !dump_opt['P'];
uchar_t *value;
uint_t cnt, idx;
(void) printf("\t\t%s = ", nvpair_name(elem));
nvpair_value_byte_array(elem, &value, &cnt);
for (idx = 0; idx < cnt; ++idx) {
if (!isprint(value[idx])) {
can_print = B_FALSE;
break;
}
}
for (idx = 0; idx < cnt; ++idx) {
if (can_print)
(void) putchar(value[idx]);
else
(void) printf("\\%3.3o", value[idx]);
}
(void) putchar('\n');
}
nvlist_free(sa_xattr);
free(sa_xattr_packed);
}
static void
dump_znode_symlink(sa_handle_t *hdl)
{
int sa_symlink_size = 0;
char linktarget[MAXPATHLEN];
int error;
error = sa_size(hdl, sa_attr_table[ZPL_SYMLINK], &sa_symlink_size);
if (error || sa_symlink_size == 0) {
return;
}
if (sa_symlink_size >= sizeof (linktarget)) {
(void) printf("symlink size %d is too large\n",
sa_symlink_size);
return;
}
linktarget[sa_symlink_size] = '\0';
if (sa_lookup(hdl, sa_attr_table[ZPL_SYMLINK],
&linktarget, sa_symlink_size) == 0)
(void) printf("\ttarget %s\n", linktarget);
}
static void
dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
sa_handle_t *hdl;
uint64_t xattr, rdev, gen;
uint64_t uid, gid, mode, fsize, parent, links;
uint64_t pflags;
uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
time_t z_crtime, z_atime, z_mtime, z_ctime;
sa_bulk_attr_t bulk[12];
int idx = 0;
int error;
VERIFY3P(os, ==, sa_os);
if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
(void) printf("Failed to get handle for SA znode\n");
return;
}
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
&links, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
&fsize, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
acctm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
modtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
crtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
chgtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
&pflags, 8);
if (sa_bulk_lookup(hdl, bulk, idx)) {
(void) sa_handle_destroy(hdl);
return;
}
z_crtime = (time_t)crtm[0];
z_atime = (time_t)acctm[0];
z_mtime = (time_t)modtm[0];
z_ctime = (time_t)chgtm[0];
if (dump_opt['d'] > 4) {
error = zfs_obj_to_path(os, object, path, sizeof (path));
if (error == ESTALE) {
(void) snprintf(path, sizeof (path), "on delete queue");
} else if (error != 0) {
leaked_objects++;
(void) snprintf(path, sizeof (path),
"path not found, possibly leaked");
}
(void) printf("\tpath %s\n", path);
}
if (S_ISLNK(mode))
dump_znode_symlink(hdl);
dump_uidgid(os, uid, gid);
(void) printf("\tatime %s", ctime(&z_atime));
(void) printf("\tmtime %s", ctime(&z_mtime));
(void) printf("\tctime %s", ctime(&z_ctime));
(void) printf("\tcrtime %s", ctime(&z_crtime));
(void) printf("\tgen %llu\n", (u_longlong_t)gen);
(void) printf("\tmode %llo\n", (u_longlong_t)mode);
(void) printf("\tsize %llu\n", (u_longlong_t)fsize);
(void) printf("\tparent %llu\n", (u_longlong_t)parent);
(void) printf("\tlinks %llu\n", (u_longlong_t)links);
(void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
uint64_t projid;
if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
sizeof (uint64_t)) == 0)
(void) printf("\tprojid %llu\n", (u_longlong_t)projid);
}
if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
sizeof (uint64_t)) == 0)
(void) printf("\txattr %llu\n", (u_longlong_t)xattr);
if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
sizeof (uint64_t)) == 0)
(void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
dump_znode_sa_xattr(hdl);
sa_handle_destroy(hdl);
}
static void
dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_none, /* unallocated */
dump_zap, /* object directory */
dump_uint64, /* object array */
dump_none, /* packed nvlist */
dump_packed_nvlist, /* packed nvlist size */
dump_none, /* bpobj */
dump_bpobj, /* bpobj header */
dump_none, /* SPA space map header */
dump_none, /* SPA space map */
dump_none, /* ZIL intent log */
dump_dnode, /* DMU dnode */
dump_dmu_objset, /* DMU objset */
dump_dsl_dir, /* DSL directory */
dump_zap, /* DSL directory child map */
dump_zap, /* DSL dataset snap map */
dump_zap, /* DSL props */
dump_dsl_dataset, /* DSL dataset */
dump_znode, /* ZFS znode */
dump_acl, /* ZFS V0 ACL */
dump_uint8, /* ZFS plain file */
dump_zpldir, /* ZFS directory */
dump_zap, /* ZFS master node */
dump_zap, /* ZFS delete queue */
dump_uint8, /* zvol object */
dump_zap, /* zvol prop */
dump_uint8, /* other uint8[] */
dump_uint64, /* other uint64[] */
dump_zap, /* other ZAP */
dump_zap, /* persistent error log */
dump_uint8, /* SPA history */
dump_history_offsets, /* SPA history offsets */
dump_zap, /* Pool properties */
dump_zap, /* DSL permissions */
dump_acl, /* ZFS ACL */
dump_uint8, /* ZFS SYSACL */
dump_none, /* FUID nvlist */
dump_packed_nvlist, /* FUID nvlist size */
dump_zap, /* DSL dataset next clones */
dump_zap, /* DSL scrub queue */
dump_zap, /* ZFS user/group/project used */
dump_zap, /* ZFS user/group/project quota */
dump_zap, /* snapshot refcount tags */
dump_ddt_zap, /* DDT ZAP object */
dump_zap, /* DDT statistics */
dump_znode, /* SA object */
dump_zap, /* SA Master Node */
dump_sa_attrs, /* SA attribute registration */
dump_sa_layouts, /* SA attribute layouts */
dump_zap, /* DSL scrub translations */
dump_none, /* fake dedup BP */
dump_zap, /* deadlist */
dump_none, /* deadlist hdr */
dump_zap, /* dsl clones */
dump_bpobj_subobjs, /* bpobj subobjs */
dump_unknown, /* Unknown type, must be last */
};
static boolean_t
match_object_type(dmu_object_type_t obj_type, uint64_t flags)
{
boolean_t match = B_TRUE;
switch (obj_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (!(flags & ZOR_FLAG_DIRECTORY))
match = B_FALSE;
break;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (!(flags & ZOR_FLAG_PLAIN_FILE))
match = B_FALSE;
break;
case DMU_OT_SPACE_MAP:
if (!(flags & ZOR_FLAG_SPACE_MAP))
match = B_FALSE;
break;
default:
if (strcmp(zdb_ot_name(obj_type), "zap") == 0) {
if (!(flags & ZOR_FLAG_ZAP))
match = B_FALSE;
break;
}
/*
* If all bits except some of the supported flags are
* set, the user combined the all-types flag (A) with
* a negated flag to exclude some types (e.g. A-f to
* show all object types except plain files).
*/
if ((flags | ZOR_SUPPORTED_FLAGS) != ZOR_FLAG_ALL_TYPES)
match = B_FALSE;
break;
}
return (match);
}
static void
dump_object(objset_t *os, uint64_t object, int verbosity,
boolean_t *print_header, uint64_t *dnode_slots_used, uint64_t flags)
{
dmu_buf_t *db = NULL;
dmu_object_info_t doi;
dnode_t *dn;
boolean_t dnode_held = B_FALSE;
void *bonus = NULL;
size_t bsize = 0;
char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
char bonus_size[32];
char aux[50];
int error;
/* make sure nicenum has enough space */
_Static_assert(sizeof (iblk) >= NN_NUMBUF_SZ, "iblk truncated");
_Static_assert(sizeof (dblk) >= NN_NUMBUF_SZ, "dblk truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ, "lsize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ, "asize truncated");
_Static_assert(sizeof (bonus_size) >= NN_NUMBUF_SZ,
"bonus_size truncated");
if (*print_header) {
(void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
"Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
"lsize", "%full", "type");
*print_header = 0;
}
if (object == 0) {
dn = DMU_META_DNODE(os);
dmu_object_info_from_dnode(dn, &doi);
} else {
/*
* Encrypted datasets will have sensitive bonus buffers
* encrypted. Therefore we cannot hold the bonus buffer and
* must hold the dnode itself instead.
*/
error = dmu_object_info(os, object, &doi);
if (error)
fatal("dmu_object_info() failed, errno %u", error);
if (!key_loaded && os->os_encrypted &&
DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
error = dnode_hold(os, object, FTAG, &dn);
if (error)
fatal("dnode_hold() failed, errno %u", error);
dnode_held = B_TRUE;
} else {
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error)
fatal("dmu_bonus_hold(%llu) failed, errno %u",
object, error);
bonus = db->db_data;
bsize = db->db_size;
dn = DB_DNODE((dmu_buf_impl_t *)db);
}
}
/*
* Default to showing all object types if no flags were specified.
*/
if (flags != 0 && flags != ZOR_FLAG_ALL_TYPES &&
!match_object_type(doi.doi_type, flags))
goto out;
if (dnode_slots_used)
*dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
(void) snprintf(fill, sizeof (fill), "%6.2f", 100.0 *
doi.doi_fill_count * doi.doi_data_block_size / (object == 0 ?
DNODES_PER_BLOCK : 1) / doi.doi_max_offset);
aux[0] = '\0';
if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum));
}
if (doi.doi_compress == ZIO_COMPRESS_INHERIT &&
ZIO_COMPRESS_HASLEVEL(os->os_compress) && verbosity >= 6) {
const char *compname = NULL;
if (zfs_prop_index_to_string(ZFS_PROP_COMPRESSION,
ZIO_COMPRESS_RAW(os->os_compress, os->os_complevel),
&compname) == 0) {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux), " (Z=inherit=%s)",
compname);
} else {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux),
" (Z=inherit=%s-unknown)",
ZDB_COMPRESS_NAME(os->os_compress));
}
} else if (doi.doi_compress == ZIO_COMPRESS_INHERIT && verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=inherit=%s)", ZDB_COMPRESS_NAME(os->os_compress));
} else if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress));
}
(void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n",
(u_longlong_t)object, doi.doi_indirection, iblk, dblk,
asize, dnsize, lsize, fill, zdb_ot_name(doi.doi_type), aux);
if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
(void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
"", "", "", "", "", "", bonus_size, "bonus",
zdb_ot_name(doi.doi_bonus_type));
}
if (verbosity >= 4) {
(void) printf("\tdnode flags: %s%s%s%s\n",
(dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
"USED_BYTES " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
"USERUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
"USEROBJUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
"SPILL_BLKPTR" : "");
(void) printf("\tdnode maxblkid: %llu\n",
(longlong_t)dn->dn_phys->dn_maxblkid);
if (!dnode_held) {
object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
object, bonus, bsize);
} else {
(void) printf("\t\t(bonus encrypted)\n");
}
if (key_loaded ||
(!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type))) {
object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
NULL, 0);
} else {
(void) printf("\t\t(object encrypted)\n");
}
*print_header = B_TRUE;
}
if (verbosity >= 5) {
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
DN_SPILL_BLKPTR(dn->dn_phys), B_FALSE);
(void) printf("\nSpill block: %s\n", blkbuf);
}
dump_indirect(dn);
}
if (verbosity >= 5) {
/*
* Report the list of segments that comprise the object.
*/
uint64_t start = 0;
uint64_t end;
uint64_t blkfill = 1;
int minlvl = 1;
if (dn->dn_type == DMU_OT_DNODE) {
minlvl = 0;
blkfill = DNODES_PER_BLOCK;
}
for (;;) {
char segsize[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (segsize) >= NN_NUMBUF_SZ,
"segsize truncated");
error = dnode_next_offset(dn,
0, &start, minlvl, blkfill, 0);
if (error)
break;
end = start;
error = dnode_next_offset(dn,
DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
zdb_nicenum(end - start, segsize, sizeof (segsize));
(void) printf("\t\tsegment [%016llx, %016llx)"
" size %5s\n", (u_longlong_t)start,
(u_longlong_t)end, segsize);
if (error)
break;
start = end;
}
}
out:
if (db != NULL)
dmu_buf_rele(db, FTAG);
if (dnode_held)
dnode_rele(dn, FTAG);
}
static void
count_dir_mos_objects(dsl_dir_t *dd)
{
mos_obj_refd(dd->dd_object);
mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_clones);
/*
* The dd_crypto_obj can be referenced by multiple dsl_dir's.
* Ignore the references after the first one.
*/
mos_obj_refd_multiple(dd->dd_crypto_obj);
}
static void
count_ds_mos_objects(dsl_dataset_t *ds)
{
mos_obj_refd(ds->ds_object);
mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
mos_obj_refd(ds->ds_bookmarks_obj);
if (!dsl_dataset_is_snapshot(ds)) {
count_dir_mos_objects(ds->ds_dir);
}
}
static const char *const objset_types[DMU_OST_NUMTYPES] = {
"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
/*
* Parse a string denoting a range of object IDs of the form
* <start>[:<end>[:flags]], and store the results in zor.
* Return 0 on success. On error, return 1 and update the msg
* pointer to point to a descriptive error message.
*/
static int
parse_object_range(char *range, zopt_object_range_t *zor, const char **msg)
{
uint64_t flags = 0;
char *p, *s, *dup, *flagstr, *tmp = NULL;
size_t len;
int i;
int rc = 0;
if (strchr(range, ':') == NULL) {
zor->zor_obj_start = strtoull(range, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in object ID";
rc = 1;
}
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = zor->zor_obj_start;
return (rc);
}
if (strchr(range, ':') == range) {
*msg = "Invalid leading colon";
rc = 1;
return (rc);
}
len = strlen(range);
if (range[len - 1] == ':') {
*msg = "Invalid trailing colon";
rc = 1;
return (rc);
}
dup = strdup(range);
s = strtok_r(dup, ":", &tmp);
zor->zor_obj_start = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in start object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
zor->zor_obj_end = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in end object ID";
rc = 1;
goto out;
}
if (zor->zor_obj_start > zor->zor_obj_end) {
*msg = "Start object ID may not exceed end object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
if (s == NULL) {
zor->zor_flags = ZOR_FLAG_ALL_TYPES;
goto out;
} else if (strtok_r(NULL, ":", &tmp) != NULL) {
*msg = "Invalid colon-delimited field after flags";
rc = 1;
goto out;
}
flagstr = s;
for (i = 0; flagstr[i]; i++) {
int bit;
boolean_t negation = (flagstr[i] == '-');
if (negation) {
i++;
if (flagstr[i] == '\0') {
*msg = "Invalid trailing negation operator";
rc = 1;
goto out;
}
}
bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
*msg = "Invalid flag";
rc = 1;
goto out;
}
if (negation)
flags &= ~bit;
else
flags |= bit;
}
zor->zor_flags = flags;
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = ZDB_MAP_OBJECT_ID(zor->zor_obj_end);
out:
free(dup);
return (rc);
}
static void
dump_objset(objset_t *os)
{
dmu_objset_stats_t dds = { 0 };
uint64_t object, object_count;
uint64_t refdbytes, usedobjs, scratch;
char numbuf[32];
char blkbuf[BP_SPRINTF_LEN + 20];
char osname[ZFS_MAX_DATASET_NAME_LEN];
const char *type = "UNKNOWN";
int verbosity = dump_opt['d'];
boolean_t print_header;
unsigned i;
int error;
uint64_t total_slots_used = 0;
uint64_t max_slot_used = 0;
uint64_t dnode_slots;
uint64_t obj_start;
uint64_t obj_end;
uint64_t flags;
/* make sure nicenum has enough space */
_Static_assert(sizeof (numbuf) >= NN_NUMBUF_SZ, "numbuf truncated");
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
print_header = B_TRUE;
if (dds.dds_type < DMU_OST_NUMTYPES)
type = objset_types[dds.dds_type];
if (dds.dds_type == DMU_OST_META) {
dds.dds_creation_txg = TXG_INITIAL;
usedobjs = BP_GET_FILL(os->os_rootbp);
refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
dd_used_bytes;
} else {
dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
}
ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
if (verbosity >= 4) {
(void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
(void) snprintf_blkptr(blkbuf + strlen(blkbuf),
sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
} else {
blkbuf[0] = '\0';
}
dmu_objset_name(os, osname);
(void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
"%s, %llu objects%s%s\n",
osname, type, (u_longlong_t)dmu_objset_id(os),
(u_longlong_t)dds.dds_creation_txg,
numbuf, (u_longlong_t)usedobjs, blkbuf,
(dds.dds_inconsistent) ? " (inconsistent)" : "");
for (i = 0; i < zopt_object_args; i++) {
obj_start = zopt_object_ranges[i].zor_obj_start;
obj_end = zopt_object_ranges[i].zor_obj_end;
flags = zopt_object_ranges[i].zor_flags;
object = obj_start;
if (object == 0 || obj_start == obj_end)
dump_object(os, object, verbosity, &print_header, NULL,
flags);
else
object--;
while ((dmu_object_next(os, &object, B_FALSE, 0) == 0) &&
object <= obj_end) {
dump_object(os, object, verbosity, &print_header, NULL,
flags);
}
}
if (zopt_object_args > 0) {
(void) printf("\n");
return;
}
if (dump_opt['i'] != 0 || verbosity >= 2)
dump_intent_log(dmu_objset_zil(os));
if (dmu_objset_ds(os) != NULL) {
dsl_dataset_t *ds = dmu_objset_ds(os);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
dump_blkptr_list(&ds->ds_dir->dd_livelist, "Livelist");
if (verify_dd_livelist(os) != 0)
fatal("livelist is incorrect");
}
if (dsl_dataset_remap_deadlist_exists(ds)) {
(void) printf("ds_remap_deadlist:\n");
dump_blkptr_list(&ds->ds_remap_deadlist, "Deadlist");
}
count_ds_mos_objects(ds);
}
if (dmu_objset_ds(os) != NULL)
dump_bookmarks(os, verbosity);
if (verbosity < 2)
return;
if (BP_IS_HOLE(os->os_rootbp))
return;
dump_object(os, 0, verbosity, &print_header, NULL, 0);
object_count = 0;
if (DMU_USERUSED_DNODE(os) != NULL &&
DMU_USERUSED_DNODE(os)->dn_type != 0) {
dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
NULL, 0);
dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
NULL, 0);
}
if (DMU_PROJECTUSED_DNODE(os) != NULL &&
DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
&print_header, NULL, 0);
object = 0;
while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
dump_object(os, object, verbosity, &print_header, &dnode_slots,
0);
object_count++;
total_slots_used += dnode_slots;
max_slot_used = object + dnode_slots - 1;
}
(void) printf("\n");
(void) printf(" Dnode slots:\n");
(void) printf("\tTotal used: %10llu\n",
(u_longlong_t)total_slots_used);
(void) printf("\tMax used: %10llu\n",
(u_longlong_t)max_slot_used);
(void) printf("\tPercent empty: %10lf\n",
(double)(max_slot_used - total_slots_used)*100 /
(double)max_slot_used);
(void) printf("\n");
if (error != ESRCH) {
(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
abort();
}
ASSERT3U(object_count, ==, usedobjs);
if (leaked_objects != 0) {
(void) printf("%d potentially leaked objects detected\n",
leaked_objects);
leaked_objects = 0;
}
}
static void
dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
{
time_t timestamp = ub->ub_timestamp;
(void) printf("%s", header ? header : "");
(void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
(void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
(void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
(void) printf("\ttimestamp = %llu UTC = %s",
(u_longlong_t)ub->ub_timestamp, ctime(&timestamp));
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
(void) printf("\tbp = %s\n", blkbuf);
(void) printf("\tmmp_magic = %016llx\n",
(u_longlong_t)ub->ub_mmp_magic);
if (MMP_VALID(ub)) {
(void) printf("\tmmp_delay = %0llu\n",
(u_longlong_t)ub->ub_mmp_delay);
if (MMP_SEQ_VALID(ub))
(void) printf("\tmmp_seq = %u\n",
(unsigned int) MMP_SEQ(ub));
if (MMP_FAIL_INT_VALID(ub))
(void) printf("\tmmp_fail = %u\n",
(unsigned int) MMP_FAIL_INT(ub));
if (MMP_INTERVAL_VALID(ub))
(void) printf("\tmmp_write = %u\n",
(unsigned int) MMP_INTERVAL(ub));
/* After MMP_* to make summarize_uberblock_mmp cleaner */
(void) printf("\tmmp_valid = %x\n",
(unsigned int) ub->ub_mmp_config & 0xFF);
}
if (dump_opt['u'] >= 4) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
(void) printf("\trootbp = %s\n", blkbuf);
}
(void) printf("\tcheckpoint_txg = %llu\n",
(u_longlong_t)ub->ub_checkpoint_txg);
(void) printf("\traidz_reflow state=%u off=%llu\n",
(int)RRSS_GET_STATE(ub),
(u_longlong_t)RRSS_GET_OFFSET(ub));
(void) printf("%s", footer ? footer : "");
}
static void
dump_config(spa_t *spa)
{
dmu_buf_t *db;
size_t nvsize = 0;
int error = 0;
error = dmu_bonus_hold(spa->spa_meta_objset,
spa->spa_config_object, FTAG, &db);
if (error == 0) {
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
(void) printf("\nMOS Configuration:\n");
dump_packed_nvlist(spa->spa_meta_objset,
spa->spa_config_object, (void *)&nvsize, 1);
} else {
(void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
(u_longlong_t)spa->spa_config_object, error);
}
}
static void
dump_cachefile(const char *cachefile)
{
int fd;
struct stat64 statbuf;
char *buf;
nvlist_t *config;
if ((fd = open64(cachefile, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", cachefile,
strerror(errno));
zdb_exit(1);
}
if (fstat64(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", cachefile,
strerror(errno));
zdb_exit(1);
}
if ((buf = malloc(statbuf.st_size)) == NULL) {
(void) fprintf(stderr, "failed to allocate %llu bytes\n",
(u_longlong_t)statbuf.st_size);
zdb_exit(1);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) fprintf(stderr, "failed to read %llu bytes\n",
(u_longlong_t)statbuf.st_size);
zdb_exit(1);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
(void) fprintf(stderr, "failed to unpack nvlist\n");
zdb_exit(1);
}
free(buf);
dump_nvlist(config, 0);
nvlist_free(config);
}
/*
* ZFS label nvlist stats
*/
typedef struct zdb_nvl_stats {
int zns_list_count;
int zns_leaf_count;
size_t zns_leaf_largest;
size_t zns_leaf_total;
nvlist_t *zns_string;
nvlist_t *zns_uint64;
nvlist_t *zns_boolean;
} zdb_nvl_stats_t;
static void
collect_nvlist_stats(nvlist_t *nvl, zdb_nvl_stats_t *stats)
{
nvlist_t *list, **array;
nvpair_t *nvp = NULL;
const char *name;
uint_t i, items;
stats->zns_list_count++;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
name = nvpair_name(nvp);
switch (nvpair_type(nvp)) {
case DATA_TYPE_STRING:
fnvlist_add_string(stats->zns_string, name,
fnvpair_value_string(nvp));
break;
case DATA_TYPE_UINT64:
fnvlist_add_uint64(stats->zns_uint64, name,
fnvpair_value_uint64(nvp));
break;
case DATA_TYPE_BOOLEAN:
fnvlist_add_boolean(stats->zns_boolean, name);
break;
case DATA_TYPE_NVLIST:
if (nvpair_value_nvlist(nvp, &list) == 0)
collect_nvlist_stats(list, stats);
break;
case DATA_TYPE_NVLIST_ARRAY:
if (nvpair_value_nvlist_array(nvp, &array, &items) != 0)
break;
for (i = 0; i < items; i++) {
collect_nvlist_stats(array[i], stats);
/* collect stats on leaf vdev */
if (strcmp(name, "children") == 0) {
size_t size;
(void) nvlist_size(array[i], &size,
NV_ENCODE_XDR);
stats->zns_leaf_total += size;
if (size > stats->zns_leaf_largest)
stats->zns_leaf_largest = size;
stats->zns_leaf_count++;
}
}
break;
default:
(void) printf("skip type %d!\n", (int)nvpair_type(nvp));
}
}
}
static void
dump_nvlist_stats(nvlist_t *nvl, size_t cap)
{
zdb_nvl_stats_t stats = { 0 };
size_t size, sum = 0, total;
size_t noise;
/* requires nvlist with non-unique names for stat collection */
VERIFY0(nvlist_alloc(&stats.zns_string, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_uint64, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_boolean, 0, 0));
VERIFY0(nvlist_size(stats.zns_boolean, &noise, NV_ENCODE_XDR));
(void) printf("\n\nZFS Label NVList Config Stats:\n");
VERIFY0(nvlist_size(nvl, &total, NV_ENCODE_XDR));
(void) printf(" %d bytes used, %d bytes free (using %4.1f%%)\n\n",
(int)total, (int)(cap - total), 100.0 * total / cap);
collect_nvlist_stats(nvl, &stats);
VERIFY0(nvlist_size(stats.zns_uint64, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "integers:",
(int)fnvlist_num_pairs(stats.zns_uint64),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_string, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "strings:",
(int)fnvlist_num_pairs(stats.zns_string),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_boolean, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "booleans:",
(int)fnvlist_num_pairs(stats.zns_boolean),
(int)size, 100.0 * size / total);
size = total - sum; /* treat remainder as nvlist overhead */
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n\n", "nvlists:",
stats.zns_list_count, (int)size, 100.0 * size / total);
if (stats.zns_leaf_count > 0) {
size_t average = stats.zns_leaf_total / stats.zns_leaf_count;
(void) printf("%12s %4d %6d bytes average\n", "leaf vdevs:",
stats.zns_leaf_count, (int)average);
(void) printf("%24d bytes largest\n",
(int)stats.zns_leaf_largest);
if (dump_opt['l'] >= 3 && average > 0)
(void) printf(" space for %d additional leaf vdevs\n",
(int)((cap - total) / average));
}
(void) printf("\n");
nvlist_free(stats.zns_string);
nvlist_free(stats.zns_uint64);
nvlist_free(stats.zns_boolean);
}
typedef struct cksum_record {
zio_cksum_t cksum;
boolean_t labels[VDEV_LABELS];
avl_node_t link;
} cksum_record_t;
static int
cksum_record_compare(const void *x1, const void *x2)
{
const cksum_record_t *l = (cksum_record_t *)x1;
const cksum_record_t *r = (cksum_record_t *)x2;
int arraysize = ARRAY_SIZE(l->cksum.zc_word);
int difference = 0;
for (int i = 0; i < arraysize; i++) {
difference = TREE_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
if (difference)
break;
}
return (difference);
}
static cksum_record_t *
cksum_record_alloc(zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
rec->cksum = *cksum;
rec->labels[l] = B_TRUE;
return (rec);
}
static cksum_record_t *
cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
{
cksum_record_t lookup = { .cksum = *cksum };
avl_index_t where;
return (avl_find(tree, &lookup, &where));
}
static cksum_record_t *
cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = cksum_record_lookup(tree, cksum);
if (rec) {
rec->labels[l] = B_TRUE;
} else {
rec = cksum_record_alloc(cksum, l);
avl_add(tree, rec);
}
return (rec);
}
static int
first_label(cksum_record_t *rec)
{
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i])
return (i);
return (-1);
}
static void
print_label_numbers(const char *prefix, const cksum_record_t *rec)
{
fputs(prefix, stdout);
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i] == B_TRUE)
printf("%d ", i);
putchar('\n');
}
#define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
typedef struct zdb_label {
vdev_label_t label;
uint64_t label_offset;
nvlist_t *config_nv;
cksum_record_t *config;
cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
boolean_t header_printed;
boolean_t read_failed;
boolean_t cksum_valid;
} zdb_label_t;
static void
print_label_header(zdb_label_t *label, int l)
{
if (dump_opt['q'])
return;
if (label->header_printed == B_TRUE)
return;
(void) printf("------------------------------------\n");
(void) printf("LABEL %d %s\n", l,
label->cksum_valid ? "" : "(Bad label cksum)");
(void) printf("------------------------------------\n");
label->header_printed = B_TRUE;
}
static void
print_l2arc_header(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device header\n");
(void) printf("------------------------------------\n");
}
static void
print_l2arc_log_blocks(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device log blocks\n");
(void) printf("------------------------------------\n");
}
static void
dump_l2arc_log_entries(uint64_t log_entries,
l2arc_log_ent_phys_t *le, uint64_t i)
{
for (int j = 0; j < log_entries; j++) {
dva_t dva = le[j].le_dva;
(void) printf("lb[%4llu]\tle[%4d]\tDVA asize: %llu, "
"vdev: %llu, offset: %llu\n",
(u_longlong_t)i, j + 1,
(u_longlong_t)DVA_GET_ASIZE(&dva),
(u_longlong_t)DVA_GET_VDEV(&dva),
(u_longlong_t)DVA_GET_OFFSET(&dva));
(void) printf("|\t\t\t\tbirth: %llu\n",
(u_longlong_t)le[j].le_birth);
(void) printf("|\t\t\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tpsize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tcompr: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&le[j])->le_prop));
(void) printf("|\t\t\t\tcomplevel: %llu\n",
(u_longlong_t)(&le[j])->le_complevel);
(void) printf("|\t\t\t\ttype: %llu\n",
(u_longlong_t)L2BLK_GET_TYPE((&le[j])->le_prop));
(void) printf("|\t\t\t\tprotected: %llu\n",
(u_longlong_t)L2BLK_GET_PROTECTED((&le[j])->le_prop));
(void) printf("|\t\t\t\tprefetch: %llu\n",
(u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop));
(void) printf("|\t\t\t\taddress: %llu\n",
(u_longlong_t)le[j].le_daddr);
(void) printf("|\t\t\t\tARC state: %llu\n",
(u_longlong_t)L2BLK_GET_STATE((&le[j])->le_prop));
(void) printf("|\n");
}
(void) printf("\n");
}
static void
dump_l2arc_log_blkptr(const l2arc_log_blkptr_t *lbps)
{
(void) printf("|\t\tdaddr: %llu\n", (u_longlong_t)lbps->lbp_daddr);
(void) printf("|\t\tpayload_asize: %llu\n",
(u_longlong_t)lbps->lbp_payload_asize);
(void) printf("|\t\tpayload_start: %llu\n",
(u_longlong_t)lbps->lbp_payload_start);
(void) printf("|\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE(lbps->lbp_prop));
(void) printf("|\t\tasize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE(lbps->lbp_prop));
(void) printf("|\t\tcompralgo: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS(lbps->lbp_prop));
(void) printf("|\t\tcksumalgo: %llu\n",
(u_longlong_t)L2BLK_GET_CHECKSUM(lbps->lbp_prop));
(void) printf("|\n\n");
}
static void
dump_l2arc_log_blocks(int fd, const l2arc_dev_hdr_phys_t *l2dhdr,
l2arc_dev_hdr_phys_t *rebuild)
{
l2arc_log_blk_phys_t this_lb;
uint64_t asize;
l2arc_log_blkptr_t lbps[2];
zio_cksum_t cksum;
int failed = 0;
l2arc_dev_t dev;
if (!dump_opt['q'])
print_l2arc_log_blocks();
memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
dev.l2ad_evict = l2dhdr->dh_evict;
dev.l2ad_start = l2dhdr->dh_start;
dev.l2ad_end = l2dhdr->dh_end;
if (l2dhdr->dh_start_lbps[0].lbp_daddr == 0) {
/* no log blocks to read */
if (!dump_opt['q']) {
(void) printf("No log blocks to read\n");
(void) printf("\n");
}
return;
} else {
dev.l2ad_hand = lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
}
dev.l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
for (;;) {
if (!l2arc_log_blkptr_valid(&dev, &lbps[0]))
break;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
if (pread64(fd, &this_lb, asize, lbps[0].lbp_daddr) != asize) {
if (!dump_opt['q']) {
(void) printf("Error while reading next log "
"block\n\n");
}
break;
}
fletcher_4_native_varsize(&this_lb, asize, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, lbps[0].lbp_cksum)) {
failed++;
if (!dump_opt['q']) {
(void) printf("Invalid cksum\n");
dump_l2arc_log_blkptr(&lbps[0]);
}
break;
}
switch (L2BLK_GET_COMPRESS((&lbps[0])->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
default: {
abd_t *abd = abd_alloc_linear(asize, B_TRUE);
abd_copy_from_buf_off(abd, &this_lb, 0, asize);
abd_t dabd;
abd_get_from_buf_struct(&dabd, &this_lb,
sizeof (this_lb));
int err = zio_decompress_data(L2BLK_GET_COMPRESS(
(&lbps[0])->lbp_prop), abd, &dabd,
asize, sizeof (this_lb), NULL);
abd_free(&dabd);
abd_free(abd);
if (err != 0) {
(void) printf("L2ARC block decompression "
"failed\n");
goto out;
}
break;
}
}
if (this_lb.lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(&this_lb, sizeof (this_lb));
if (this_lb.lb_magic != L2ARC_LOG_BLK_MAGIC) {
if (!dump_opt['q'])
(void) printf("Invalid log block magic\n\n");
break;
}
rebuild->dh_lb_count++;
rebuild->dh_lb_asize += asize;
if (dump_opt['l'] > 1 && !dump_opt['q']) {
(void) printf("lb[%4llu]\tmagic: %llu\n",
(u_longlong_t)rebuild->dh_lb_count,
(u_longlong_t)this_lb.lb_magic);
dump_l2arc_log_blkptr(&lbps[0]);
}
if (dump_opt['l'] > 2 && !dump_opt['q'])
dump_l2arc_log_entries(l2dhdr->dh_log_entries,
this_lb.lb_entries,
rebuild->dh_lb_count);
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev.l2ad_evict) &&
!dev.l2ad_first)
break;
lbps[0] = lbps[1];
lbps[1] = this_lb.lb_prev_lbp;
}
out:
if (!dump_opt['q']) {
(void) printf("log_blk_count:\t %llu with valid cksum\n",
(u_longlong_t)rebuild->dh_lb_count);
(void) printf("\t\t %d with invalid cksum\n", failed);
(void) printf("log_blk_asize:\t %llu\n\n",
(u_longlong_t)rebuild->dh_lb_asize);
}
}
static int
dump_l2arc_header(int fd)
{
l2arc_dev_hdr_phys_t l2dhdr = {0}, rebuild = {0};
int error = B_FALSE;
if (pread64(fd, &l2dhdr, sizeof (l2dhdr),
VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) {
error = B_TRUE;
} else {
if (l2dhdr.dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(&l2dhdr, sizeof (l2dhdr));
if (l2dhdr.dh_magic != L2ARC_DEV_HDR_MAGIC)
error = B_TRUE;
}
if (error) {
(void) printf("L2ARC device header not found\n\n");
/* Do not return an error here for backward compatibility */
return (0);
} else if (!dump_opt['q']) {
print_l2arc_header();
(void) printf(" magic: %llu\n",
(u_longlong_t)l2dhdr.dh_magic);
(void) printf(" version: %llu\n",
(u_longlong_t)l2dhdr.dh_version);
(void) printf(" pool_guid: %llu\n",
(u_longlong_t)l2dhdr.dh_spa_guid);
(void) printf(" flags: %llu\n",
(u_longlong_t)l2dhdr.dh_flags);
(void) printf(" start_lbps[0]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[0].lbp_daddr);
(void) printf(" start_lbps[1]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[1].lbp_daddr);
(void) printf(" log_blk_ent: %llu\n",
(u_longlong_t)l2dhdr.dh_log_entries);
(void) printf(" start: %llu\n",
(u_longlong_t)l2dhdr.dh_start);
(void) printf(" end: %llu\n",
(u_longlong_t)l2dhdr.dh_end);
(void) printf(" evict: %llu\n",
(u_longlong_t)l2dhdr.dh_evict);
(void) printf(" lb_asize_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_asize);
(void) printf(" lb_count_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_count);
(void) printf(" trim_action_time: %llu\n",
(u_longlong_t)l2dhdr.dh_trim_action_time);
(void) printf(" trim_state: %llu\n\n",
(u_longlong_t)l2dhdr.dh_trim_state);
}
dump_l2arc_log_blocks(fd, &l2dhdr, &rebuild);
/*
* The total aligned size of log blocks and the number of log blocks
* reported in the header of the device may be less than what zdb
* reports by dump_l2arc_log_blocks() which emulates l2arc_rebuild().
* This happens because dump_l2arc_log_blocks() lacks the memory
* pressure valve that l2arc_rebuild() has. Thus, if we are on a system
* with low memory, l2arc_rebuild will exit prematurely and dh_lb_asize
* and dh_lb_count will be lower to begin with than what exists on the
* device. This is normal and zdb should not exit with an error. The
* opposite case should never happen though, the values reported in the
* header should never be higher than what dump_l2arc_log_blocks() and
* l2arc_rebuild() report. If this happens there is a leak in the
* accounting of log blocks.
*/
if (l2dhdr.dh_lb_asize > rebuild.dh_lb_asize ||
l2dhdr.dh_lb_count > rebuild.dh_lb_count)
return (1);
return (0);
}
static void
dump_config_from_label(zdb_label_t *label, size_t buflen, int l)
{
if (dump_opt['q'])
return;
if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
return;
print_label_header(label, l);
dump_nvlist(label->config_nv, 4);
print_label_numbers(" labels = ", label->config);
if (dump_opt['l'] >= 2)
dump_nvlist_stats(label->config_nv, buflen);
}
#define ZDB_MAX_UB_HEADER_SIZE 32
static void
dump_label_uberblocks(zdb_label_t *label, uint64_t ashift, int label_num)
{
vdev_t vd;
char header[ZDB_MAX_UB_HEADER_SIZE];
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)&label->label + uoff);
cksum_record_t *rec = label->uberblocks[i];
if (rec == NULL) {
if (dump_opt['u'] >= 2) {
print_label_header(label, label_num);
(void) printf(" Uberblock[%d] invalid\n", i);
}
continue;
}
if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
continue;
if ((dump_opt['u'] < 4) &&
(ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
(i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
continue;
print_label_header(label, label_num);
(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
" Uberblock[%d]\n", i);
dump_uberblock(ub, header, "");
print_label_numbers(" labels = ", rec);
}
}
static char curpath[PATH_MAX];
/*
* Iterate through the path components, recursively passing
* current one's obj and remaining path until we find the obj
* for the last one.
*/
static int
dump_path_impl(objset_t *os, uint64_t obj, char *name, uint64_t *retobj)
{
int err;
boolean_t header = B_TRUE;
uint64_t child_obj;
char *s;
dmu_buf_t *db;
dmu_object_info_t doi;
if ((s = strchr(name, '/')) != NULL)
*s = '\0';
err = zap_lookup(os, obj, name, 8, 1, &child_obj);
(void) strlcat(curpath, name, sizeof (curpath));
if (err != 0) {
(void) fprintf(stderr, "failed to lookup %s: %s\n",
curpath, strerror(err));
return (err);
}
child_obj = ZFS_DIRENT_OBJ(child_obj);
err = sa_buf_hold(os, child_obj, FTAG, &db);
if (err != 0) {
(void) fprintf(stderr,
"failed to get SA dbuf for obj %llu: %s\n",
(u_longlong_t)child_obj, strerror(err));
return (EINVAL);
}
dmu_object_info_from_db(db, &doi);
sa_buf_rele(db, FTAG);
if (doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) {
(void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
doi.doi_bonus_type, (u_longlong_t)child_obj);
return (EINVAL);
}
if (dump_opt['v'] > 6) {
(void) printf("obj=%llu %s type=%d bonustype=%d\n",
(u_longlong_t)child_obj, curpath, doi.doi_type,
doi.doi_bonus_type);
}
(void) strlcat(curpath, "/", sizeof (curpath));
switch (doi.doi_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (s != NULL && *(s + 1) != '\0')
return (dump_path_impl(os, child_obj, s + 1, retobj));
zfs_fallthrough;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (retobj != NULL) {
*retobj = child_obj;
} else {
dump_object(os, child_obj, dump_opt['v'], &header,
NULL, 0);
}
return (0);
default:
(void) fprintf(stderr, "object %llu has non-file/directory "
"type %d\n", (u_longlong_t)obj, doi.doi_type);
break;
}
return (EINVAL);
}
/*
* Dump the blocks for the object specified by path inside the dataset.
*/
static int
dump_path(char *ds, char *path, uint64_t *retobj)
{
int err;
objset_t *os;
uint64_t root_obj;
err = open_objset(ds, FTAG, &os);
if (err != 0)
return (err);
err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
if (err != 0) {
(void) fprintf(stderr, "can't lookup root znode: %s\n",
strerror(err));
close_objset(os, FTAG);
return (EINVAL);
}
(void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
err = dump_path_impl(os, root_obj, path, retobj);
close_objset(os, FTAG);
return (err);
}
static int
dump_backup_bytes(objset_t *os, void *buf, int len, void *arg)
{
const char *p = (const char *)buf;
ssize_t nwritten;
(void) os;
(void) arg;
/* Write the data out, handling short writes and signals. */
while ((nwritten = write(STDOUT_FILENO, p, len)) < len) {
if (nwritten < 0) {
if (errno == EINTR)
continue;
return (errno);
}
p += nwritten;
len -= nwritten;
}
return (0);
}
static void
dump_backup(const char *pool, uint64_t objset_id, const char *flagstr)
{
boolean_t embed = B_FALSE;
boolean_t large_block = B_FALSE;
boolean_t compress = B_FALSE;
boolean_t raw = B_FALSE;
const char *c;
for (c = flagstr; c != NULL && *c != '\0'; c++) {
switch (*c) {
case 'e':
embed = B_TRUE;
break;
case 'L':
large_block = B_TRUE;
break;
case 'c':
compress = B_TRUE;
break;
case 'w':
raw = B_TRUE;
break;
default:
fprintf(stderr, "dump_backup: invalid flag "
"'%c'\n", *c);
return;
}
}
if (isatty(STDOUT_FILENO)) {
fprintf(stderr, "dump_backup: stream cannot be written "
"to a terminal\n");
return;
}
offset_t off = 0;
dmu_send_outparams_t out = {
.dso_outfunc = dump_backup_bytes,
.dso_dryrun = B_FALSE,
};
int err = dmu_send_obj(pool, objset_id, /* fromsnap */0, embed,
large_block, compress, raw, /* saved */ B_FALSE, STDOUT_FILENO,
&off, &out);
if (err != 0) {
fprintf(stderr, "dump_backup: dmu_send_obj: %s\n",
strerror(err));
return;
}
}
static int
zdb_copy_object(objset_t *os, uint64_t srcobj, char *destfile)
{
int err = 0;
uint64_t size, readsize, oursize, offset;
ssize_t writesize;
sa_handle_t *hdl;
(void) printf("Copying object %" PRIu64 " to file %s\n", srcobj,
destfile);
VERIFY3P(os, ==, sa_os);
if ((err = sa_handle_get(os, srcobj, NULL, SA_HDL_PRIVATE, &hdl))) {
(void) printf("Failed to get handle for SA znode\n");
return (err);
}
if ((err = sa_lookup(hdl, sa_attr_table[ZPL_SIZE], &size, 8))) {
(void) sa_handle_destroy(hdl);
return (err);
}
(void) sa_handle_destroy(hdl);
(void) printf("Object %" PRIu64 " is %" PRIu64 " bytes\n", srcobj,
size);
if (size == 0) {
return (EINVAL);
}
int fd = open(destfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (fd == -1)
return (errno);
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
offset = 0;
char *buf = kmem_alloc(oursize, KM_NOSLEEP);
if (buf == NULL) {
(void) close(fd);
return (ENOMEM);
}
while (offset < size) {
readsize = MIN(size - offset, 1 << 20);
err = dmu_read(os, srcobj, offset, readsize, buf, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(buf, oursize);
(void) close(fd);
return (err);
}
if (dump_opt['v'] > 3) {
(void) printf("Read offset=%" PRIu64 " size=%" PRIu64
" error=%d\n", offset, readsize, err);
}
writesize = write(fd, buf, readsize);
if (writesize < 0) {
err = errno;
break;
} else if (writesize != readsize) {
/* Incomplete write */
(void) fprintf(stderr, "Short write, only wrote %llu of"
" %" PRIu64 " bytes, exiting...\n",
(u_longlong_t)writesize, readsize);
break;
}
offset += readsize;
}
(void) close(fd);
if (buf != NULL)
kmem_free(buf, oursize);
return (err);
}
static boolean_t
label_cksum_valid(vdev_label_t *label, uint64_t offset)
{
zio_checksum_info_t *ci = &zio_checksum_table[ZIO_CHECKSUM_LABEL];
zio_cksum_t expected_cksum;
zio_cksum_t actual_cksum;
zio_cksum_t verifier;
zio_eck_t *eck;
int byteswap;
void *data = (char *)label + offsetof(vdev_label_t, vl_vdev_phys);
eck = (zio_eck_t *)((char *)(data) + VDEV_PHYS_SIZE) - 1;
offset += offsetof(vdev_label_t, vl_vdev_phys);
ZIO_SET_CHECKSUM(&verifier, offset, 0, 0, 0);
byteswap = (eck->zec_magic == BSWAP_64(ZEC_MAGIC));
if (byteswap)
byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
expected_cksum = eck->zec_cksum;
eck->zec_cksum = verifier;
abd_t *abd = abd_get_from_buf(data, VDEV_PHYS_SIZE);
ci->ci_func[byteswap](abd, VDEV_PHYS_SIZE, NULL, &actual_cksum);
abd_free(abd);
if (byteswap)
byteswap_uint64_array(&expected_cksum, sizeof (zio_cksum_t));
if (ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (B_TRUE);
return (B_FALSE);
}
static int
dump_label(const char *dev)
{
char path[MAXPATHLEN];
zdb_label_t labels[VDEV_LABELS] = {{{{0}}}};
uint64_t psize, ashift, l2cache;
struct stat64 statbuf;
boolean_t config_found = B_FALSE;
boolean_t error = B_FALSE;
boolean_t read_l2arc_header = B_FALSE;
avl_tree_t config_tree;
avl_tree_t uberblock_tree;
void *node, *cookie;
int fd;
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(path, dev, sizeof (path));
if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
int error;
error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(path)) {
if (zfs_append_partition(path, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat64(path, &statbuf) != 0)) {
(void) printf("failed to find device %s, try "
"specifying absolute path instead\n", dev);
return (1);
}
}
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
zdb_exit(1);
}
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
(void) close(fd);
zdb_exit(1);
}
if (S_ISBLK(statbuf.st_mode) && zfs_dev_flush(fd) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
psize = statbuf.st_size;
psize = P2ALIGN_TYPED(psize, sizeof (vdev_label_t), uint64_t);
ashift = SPA_MINBLOCKSHIFT;
/*
* 1. Read the label from disk
* 2. Verify label cksum
* 3. Unpack the configuration and insert in config tree.
* 4. Traverse all uberblocks and insert in uberblock tree.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
char *buf = label->label.vl_vdev_phys.vp_nvlist;
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
nvlist_t *config;
cksum_record_t *rec;
zio_cksum_t cksum;
vdev_t vd;
label->label_offset = vdev_label_offset(psize, l, 0);
if (pread64(fd, &label->label, sizeof (label->label),
label->label_offset) != sizeof (label->label)) {
if (!dump_opt['q'])
(void) printf("failed to read label %d\n", l);
label->read_failed = B_TRUE;
error = B_TRUE;
continue;
}
label->read_failed = B_FALSE;
label->cksum_valid = label_cksum_valid(&label->label,
label->label_offset);
if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
nvlist_t *vdev_tree = NULL;
size_t size;
if ((nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
(nvlist_lookup_uint64(vdev_tree,
ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
ashift = SPA_MINBLOCKSHIFT;
if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
size = buflen;
/* If the device is a cache device read the header. */
if (!read_l2arc_header) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
l2cache == POOL_STATE_L2CACHE) {
read_l2arc_header = B_TRUE;
}
}
fletcher_4_native_varsize(buf, size, &cksum);
rec = cksum_record_insert(&config_tree, &cksum, l);
label->config = rec;
label->config_nv = config;
config_found = B_TRUE;
} else {
error = B_TRUE;
}
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)label + uoff);
if (uberblock_verify(ub))
continue;
fletcher_4_native_varsize(ub, sizeof (*ub), &cksum);
rec = cksum_record_insert(&uberblock_tree, &cksum, l);
label->uberblocks[i] = rec;
}
}
/*
* Dump the label and uberblocks.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
if (label->read_failed == B_TRUE)
continue;
if (label->config_nv) {
dump_config_from_label(label, buflen, l);
} else {
if (!dump_opt['q'])
(void) printf("failed to unpack label %d\n", l);
}
if (dump_opt['u'])
dump_label_uberblocks(label, ashift, l);
nvlist_free(label->config_nv);
}
/*
* Dump the L2ARC header, if existent.
*/
if (read_l2arc_header)
error |= dump_l2arc_header(fd);
cookie = NULL;
while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
cookie = NULL;
while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
avl_destroy(&config_tree);
avl_destroy(&uberblock_tree);
(void) close(fd);
return (config_found == B_FALSE ? 2 :
(error == B_TRUE ? 1 : 0));
}
static uint64_t dataset_feature_count[SPA_FEATURES];
static uint64_t global_feature_count[SPA_FEATURES];
static uint64_t remap_deadlist_count = 0;
static int
dump_one_objset(const char *dsname, void *arg)
{
(void) arg;
int error;
objset_t *os;
spa_feature_t f;
error = open_objset(dsname, FTAG, &os);
if (error != 0)
return (0);
for (f = 0; f < SPA_FEATURES; f++) {
if (!dsl_dataset_feature_is_active(dmu_objset_ds(os), f))
continue;
ASSERT(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET);
dataset_feature_count[f]++;
}
if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
remap_deadlist_count++;
}
for (dsl_bookmark_node_t *dbn =
avl_first(&dmu_objset_ds(os)->ds_bookmarks); dbn != NULL;
dbn = AVL_NEXT(&dmu_objset_ds(os)->ds_bookmarks, dbn)) {
mos_obj_refd(dbn->dbn_phys.zbm_redaction_obj);
if (dbn->dbn_phys.zbm_redaction_obj != 0) {
global_feature_count[
SPA_FEATURE_REDACTION_BOOKMARKS]++;
objset_t *mos = os->os_spa->spa_meta_objset;
dnode_t *rl;
VERIFY0(dnode_hold(mos,
dbn->dbn_phys.zbm_redaction_obj, FTAG, &rl));
if (rl->dn_have_spill) {
global_feature_count[
SPA_FEATURE_REDACTION_LIST_SPILL]++;
}
}
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN]++;
}
if (dsl_deadlist_is_open(&dmu_objset_ds(os)->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
global_feature_count[SPA_FEATURE_LIVELIST]++;
}
dump_objset(os);
close_objset(os, FTAG);
fuid_table_destroy();
return (0);
}
/*
* Block statistics.
*/
#define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
typedef struct zdb_blkstats {
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_count;
uint64_t zb_gangs;
uint64_t zb_ditto_samevdev;
uint64_t zb_ditto_same_ms;
uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
} zdb_blkstats_t;
/*
* Extended object types to report deferred frees and dedup auto-ditto blocks.
*/
#define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
#define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
static const char *zdb_ot_extname[] = {
"deferred free",
"dedup ditto",
"other",
"Total",
};
#define ZB_TOTAL DN_MAX_LEVELS
#define SPA_MAX_FOR_16M (SPA_MAXBLOCKSHIFT+1)
typedef struct zdb_brt_entry {
dva_t zbre_dva;
uint64_t zbre_refcount;
avl_node_t zbre_node;
} zdb_brt_entry_t;
typedef struct zdb_cb {
zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
uint64_t zcb_removing_size;
uint64_t zcb_checkpoint_size;
uint64_t zcb_dedup_asize;
uint64_t zcb_dedup_blocks;
uint64_t zcb_clone_asize;
uint64_t zcb_clone_blocks;
uint64_t zcb_psize_count[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_count[SPA_MAX_FOR_16M];
uint64_t zcb_asize_count[SPA_MAX_FOR_16M];
uint64_t zcb_psize_len[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_len[SPA_MAX_FOR_16M];
uint64_t zcb_asize_len[SPA_MAX_FOR_16M];
uint64_t zcb_psize_total;
uint64_t zcb_lsize_total;
uint64_t zcb_asize_total;
uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
[BPE_PAYLOAD_SIZE + 1];
uint64_t zcb_start;
hrtime_t zcb_lastprint;
uint64_t zcb_totalasize;
uint64_t zcb_errors[256];
int zcb_readfails;
int zcb_haderrors;
spa_t *zcb_spa;
uint32_t **zcb_vd_obsolete_counts;
avl_tree_t zcb_brt;
boolean_t zcb_brt_is_active;
} zdb_cb_t;
/* test if two DVA offsets from same vdev are within the same metaslab */
static boolean_t
same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2)
{
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t ms_shift = vd->vdev_ms_shift;
return ((off1 >> ms_shift) == (off2 >> ms_shift));
}
/*
* Used to simplify reporting of the histogram data.
*/
typedef struct one_histo {
const char *name;
uint64_t *count;
uint64_t *len;
uint64_t cumulative;
} one_histo_t;
/*
* The number of separate histograms processed for psize, lsize and asize.
*/
#define NUM_HISTO 3
/*
* This routine will create a fixed column size output of three different
* histograms showing by blocksize of 512 - 2^ SPA_MAX_FOR_16M
* the count, length and cumulative length of the psize, lsize and
* asize blocks.
*
* All three types of blocks are listed on a single line
*
* By default the table is printed in nicenumber format (e.g. 123K) but
* if the '-P' parameter is specified then the full raw number (parseable)
* is printed out.
*/
static void
dump_size_histograms(zdb_cb_t *zcb)
{
/*
* A temporary buffer that allows us to convert a number into
* a string using zdb_nicenumber to allow either raw or human
* readable numbers to be output.
*/
char numbuf[32];
/*
* Define titles which are used in the headers of the tables
* printed by this routine.
*/
const char blocksize_title1[] = "block";
const char blocksize_title2[] = "size";
const char count_title[] = "Count";
const char length_title[] = "Size";
const char cumulative_title[] = "Cum.";
/*
* Setup the histogram arrays (psize, lsize, and asize).
*/
one_histo_t parm_histo[NUM_HISTO];
parm_histo[0].name = "psize";
parm_histo[0].count = zcb->zcb_psize_count;
parm_histo[0].len = zcb->zcb_psize_len;
parm_histo[0].cumulative = 0;
parm_histo[1].name = "lsize";
parm_histo[1].count = zcb->zcb_lsize_count;
parm_histo[1].len = zcb->zcb_lsize_len;
parm_histo[1].cumulative = 0;
parm_histo[2].name = "asize";
parm_histo[2].count = zcb->zcb_asize_count;
parm_histo[2].len = zcb->zcb_asize_len;
parm_histo[2].cumulative = 0;
(void) printf("\nBlock Size Histogram\n");
/*
* Print the first line titles
*/
if (dump_opt['P'])
(void) printf("\n%s\t", blocksize_title1);
else
(void) printf("\n%7s ", blocksize_title1);
for (int j = 0; j < NUM_HISTO; j++) {
if (dump_opt['P']) {
if (j < NUM_HISTO - 1) {
(void) printf("%s\t\t\t", parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf(" %s", parm_histo[j].name);
}
} else {
if (j < NUM_HISTO - 1) {
/* Left aligned strings in the output */
(void) printf("%-7s ",
parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf("%s", parm_histo[j].name);
}
}
}
(void) printf("\n");
/*
* Print the second line titles
*/
if (dump_opt['P']) {
(void) printf("%s\t", blocksize_title2);
} else {
(void) printf("%7s ", blocksize_title2);
}
for (int i = 0; i < NUM_HISTO; i++) {
if (dump_opt['P']) {
(void) printf("%s\t%s\t%s\t",
count_title, length_title, cumulative_title);
} else {
(void) printf("%7s%7s%7s",
count_title, length_title, cumulative_title);
}
}
(void) printf("\n");
/*
* Print the rows
*/
for (int i = SPA_MINBLOCKSHIFT; i < SPA_MAX_FOR_16M; i++) {
/*
* Print the first column showing the blocksize
*/
zdb_nicenum((1ULL << i), numbuf, sizeof (numbuf));
if (dump_opt['P']) {
printf("%s", numbuf);
} else {
printf("%7s:", numbuf);
}
/*
* Print the remaining set of 3 columns per size:
* for psize, lsize and asize
*/
for (int j = 0; j < NUM_HISTO; j++) {
parm_histo[j].cumulative += parm_histo[j].len[i];
zdb_nicenum(parm_histo[j].count[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].len[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].cumulative,
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
}
(void) printf("\n");
}
}
static void
zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
dmu_object_type_t type)
{
int i;
ASSERT(type < ZDB_OT_TOTAL);
if (zilog && zil_bp_tree_add(zilog, bp) != 0)
return;
/*
* This flag controls if we will issue a claim for the block while
* counting it, to ensure that all blocks are referenced in space maps.
* We don't issue claims if we're not doing leak tracking, because it's
* expensive if the user isn't interested. We also don't claim the
* second or later occurences of cloned or dedup'd blocks, because we
* already claimed them the first time.
*/
boolean_t do_claim = !dump_opt['L'];
spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER);
blkptr_t tempbp;
if (BP_GET_DEDUP(bp)) {
/*
* Dedup'd blocks are special. We need to count them, so we can
* later uncount them when reporting leaked space, and we must
* only claim them once.
*
* We use the existing dedup system to track what we've seen.
* The first time we see a block, we do a ddt_lookup() to see
* if it exists in the DDT. If we're doing leak tracking, we
* claim the block at this time.
*
* Each time we see a block, we reduce the refcount in the
* entry by one, and add to the size and count of dedup'd
* blocks to report at the end.
*/
ddt_t *ddt = ddt_select(zcb->zcb_spa, bp);
ddt_enter(ddt);
/*
* Find the block. This will create the entry in memory, but
* we'll know if that happened by its refcount.
*/
ddt_entry_t *dde = ddt_lookup(ddt, bp);
/*
* ddt_lookup() can return NULL if this block didn't exist
* in the DDT and creating it would take the DDT over its
* quota. Since we got the block from disk, it must exist in
* the DDT, so this can't happen. However, when unique entries
* are pruned, the dedup bit can be set with no corresponding
* entry in the DDT.
*/
if (dde == NULL) {
ddt_exit(ddt);
goto skipped;
}
/* Get the phys for this variant */
ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
/*
* This entry may have multiple sets of DVAs. We must claim
* each set the first time we see them in a real block on disk,
* or count them on subsequent occurences. We don't have a
* convenient way to track the first time we see each variant,
* so we repurpose dde_io as a set of "seen" flag bits. We can
* do this safely in zdb because it never writes, so it will
* never have a writing zio for this block in that pointer.
*/
boolean_t seen = !!(((uintptr_t)dde->dde_io) & (1 << v));
if (!seen)
dde->dde_io =
(void *)(((uintptr_t)dde->dde_io) | (1 << v));
/* Consume a reference for this block. */
if (ddt_phys_total_refcnt(ddt, dde->dde_phys) > 0)
ddt_phys_decref(dde->dde_phys, v);
/*
* If this entry has a single flat phys, it may have been
* extended with additional DVAs at some time in its life.
* This block might be from before it was fully extended, and
* so have fewer DVAs.
*
* If this is the first time we've seen this block, and we
* claimed it as-is, then we would miss the claim on some
* number of DVAs, which would then be seen as leaked.
*
* In all cases, if we've had fewer DVAs, then the asize would
* be too small, and would lead to the pool apparently using
* more space than allocated.
*
* To handle this, we copy the canonical set of DVAs from the
* entry back to the block pointer before we claim it.
*/
if (v == DDT_PHYS_FLAT) {
ASSERT3U(BP_GET_BIRTH(bp), ==,
ddt_phys_birth(dde->dde_phys, v));
tempbp = *bp;
ddt_bp_fill(dde->dde_phys, v, &tempbp,
BP_GET_BIRTH(bp));
bp = &tempbp;
}
if (seen) {
/*
* The second or later time we see this block,
* it's a duplicate and we count it.
*/
zcb->zcb_dedup_asize += BP_GET_ASIZE(bp);
zcb->zcb_dedup_blocks++;
/* Already claimed, don't do it again. */
do_claim = B_FALSE;
}
ddt_exit(ddt);
} else if (zcb->zcb_brt_is_active &&
brt_maybe_exists(zcb->zcb_spa, bp)) {
/*
* Cloned blocks are special. We need to count them, so we can
* later uncount them when reporting leaked space, and we must
* only claim them once.
*
* To do this, we keep our own in-memory BRT. For each block
* we haven't seen before, we look it up in the real BRT and
* if its there, we note it and its refcount then proceed as
* normal. If we see the block again, we count it as a clone
* and then give it no further consideration.
*/
zdb_brt_entry_t zbre_search, *zbre;
avl_index_t where;
zbre_search.zbre_dva = bp->blk_dva[0];
zbre = avl_find(&zcb->zcb_brt, &zbre_search, &where);
if (zbre == NULL) {
/* Not seen before; track it */
uint64_t refcnt =
brt_entry_get_refcount(zcb->zcb_spa, bp);
if (refcnt > 0) {
zbre = umem_zalloc(sizeof (zdb_brt_entry_t),
UMEM_NOFAIL);
zbre->zbre_dva = bp->blk_dva[0];
zbre->zbre_refcount = refcnt;
avl_insert(&zcb->zcb_brt, zbre, where);
}
} else {
/*
* Second or later occurrence, count it and take a
* refcount.
*/
zcb->zcb_clone_asize += BP_GET_ASIZE(bp);
zcb->zcb_clone_blocks++;
zbre->zbre_refcount--;
if (zbre->zbre_refcount == 0) {
avl_remove(&zcb->zcb_brt, zbre);
umem_free(zbre, sizeof (zdb_brt_entry_t));
}
/* Already claimed, don't do it again. */
do_claim = B_FALSE;
}
}
skipped:
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
int t = (i & 1) ? type : ZDB_OT_TOTAL;
int equal;
zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_count++;
/*
* The histogram is only big enough to record blocks up to
* SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
* "other", bucket.
*/
unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
zb->zb_psize_histogram[idx]++;
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) {
zb->zb_ditto_samevdev++;
if (same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
}
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal != 0) {
zb->zb_ditto_samevdev++;
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
}
break;
}
}
spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG);
if (BP_IS_EMBEDDED(bp)) {
zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
[BPE_GET_PSIZE(bp)]++;
return;
}
/*
* The binning histogram bins by powers of two up to
* SPA_MAXBLOCKSIZE rather than creating bins for
* every possible blocksize found in the pool.
*/
int bin = highbit64(BP_GET_PSIZE(bp)) - 1;
zcb->zcb_psize_count[bin]++;
zcb->zcb_psize_len[bin] += BP_GET_PSIZE(bp);
zcb->zcb_psize_total += BP_GET_PSIZE(bp);
bin = highbit64(BP_GET_LSIZE(bp)) - 1;
zcb->zcb_lsize_count[bin]++;
zcb->zcb_lsize_len[bin] += BP_GET_LSIZE(bp);
zcb->zcb_lsize_total += BP_GET_LSIZE(bp);
bin = highbit64(BP_GET_ASIZE(bp)) - 1;
zcb->zcb_asize_count[bin]++;
zcb->zcb_asize_len[bin] += BP_GET_ASIZE(bp);
zcb->zcb_asize_total += BP_GET_ASIZE(bp);
if (!do_claim)
return;
VERIFY0(zio_wait(zio_claim(NULL, zcb->zcb_spa,
spa_min_claim_txg(zcb->zcb_spa), bp, NULL, NULL,
ZIO_FLAG_CANFAIL)));
}
static void
zdb_blkptr_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
int ioerr = zio->io_error;
zdb_cb_t *zcb = zio->io_private;
zbookmark_phys_t *zb = &zio->io_bookmark;
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
char blkbuf[BP_SPRINTF_LEN];
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
abd_free(zio->io_abd);
}
static int
zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zdb_cb_t *zcb = arg;
dmu_object_type_t type;
boolean_t is_metadata;
if (zb->zb_level == ZB_DNODE_LEVEL)
return (0);
if (dump_opt['b'] >= 5 && BP_GET_LOGICAL_BIRTH(bp) > 0) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("objset %llu object %llu "
"level %lld offset 0x%llx %s\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(u_longlong_t)blkid2offset(dnp, bp, zb),
blkbuf);
}
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
type = BP_GET_TYPE(bp);
zdb_count_block(zcb, zilog, bp,
(type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
if (!BP_IS_EMBEDDED(bp) &&
(dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
size_t size = BP_GET_PSIZE(bp);
abd_t *abd = abd_alloc(size, B_FALSE);
int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
flags |= ZIO_FLAG_SPECULATIVE;
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes > max_inflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(NULL, spa, bp, abd, size,
zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
}
zcb->zcb_readfails = 0;
/* only call gethrtime() every 100 blocks */
static int iters;
if (++iters > 100)
iters = 0;
else
return (0);
if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
uint64_t now = gethrtime();
char buf[10];
uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
uint64_t kb_per_sec =
1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
uint64_t sec_remaining =
(zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
/* make sure nicenum has enough space */
_Static_assert(sizeof (buf) >= NN_NUMBUF_SZ, "buf truncated");
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"\r%5s completed (%4"PRIu64"MB/s) "
"estimated time remaining: "
"%"PRIu64"hr %02"PRIu64"min %02"PRIu64"sec ",
buf, kb_per_sec / 1024,
sec_remaining / 60 / 60,
sec_remaining / 60 % 60,
sec_remaining % 60);
zcb->zcb_lastprint = now;
}
return (0);
}
static void
zdb_leak(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
(u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
}
static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
static int
load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
spa_vdev_removal_t *svr = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
/* skip vdevs we don't care about */
if (sme->sme_vdev != svr->svr_vdev_id)
return (0);
vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
- range_tree_add(svr->svr_allocd_segs, offset, size);
+ zfs_range_tree_add(svr->svr_allocd_segs, offset, size);
else
- range_tree_remove(svr->svr_allocd_segs, offset, size);
+ zfs_range_tree_remove(svr->svr_allocd_segs, offset, size);
return (0);
}
static void
claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset, (void) arg;
/*
* This callback was called through a remap from
* a device being removed. Therefore, the vdev that
* this callback is applied to is a concrete
* vdev.
*/
ASSERT(vdev_is_concrete(vd));
VERIFY0(metaslab_claim_impl(vd, offset, size,
spa_min_claim_txg(vd->vdev_spa)));
}
static void
claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
claim_segment_impl_cb, NULL);
}
/*
* After accounting for all allocated blocks that are directly referenced,
* we might have missed a reference to a block from a partially complete
* (and thus unused) indirect mapping object. We perform a secondary pass
* through the metaslabs we have already mapped and claim the destination
* blocks.
*/
static void
zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return;
if (spa->spa_vdev_removal == NULL)
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
- ASSERT0(range_tree_space(svr->svr_allocd_segs));
+ ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
- range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
+ zfs_range_tree_t *allocs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
+ NULL, 0, 0);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
- ASSERT0(range_tree_space(allocs));
+ ASSERT0(zfs_range_tree_space(allocs));
if (msp->ms_sm != NULL)
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
- range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
+ zfs_range_tree_vacate(allocs, zfs_range_tree_add,
+ svr->svr_allocd_segs);
}
- range_tree_destroy(allocs);
+ zfs_range_tree_destroy(allocs);
iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
/*
* Clear everything past what has been synced,
* because we have not allocated mappings for
* it yet.
*/
- range_tree_clear(svr->svr_allocd_segs,
+ zfs_range_tree_clear(svr->svr_allocd_segs,
vdev_indirect_mapping_max_offset(vim),
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
- zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
- range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
+ zcb->zcb_removing_size += zfs_range_tree_space(svr->svr_allocd_segs);
+ zfs_range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static int
increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
spa_t *spa = zcb->zcb_spa;
vdev_t *vd;
const dva_t *dva = &bp->blk_dva[0];
ASSERT(!bp_freed);
ASSERT(!dump_opt['L']);
ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
vdev_indirect_mapping_increment_obsolete_count(
vd->vdev_indirect_mapping,
DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
return (0);
}
static uint32_t *
zdb_load_obsolete_counts(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint64_t obsolete_sm_object;
uint32_t *counts;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
EQUIV(obsolete_sm_object != 0, vd->vdev_obsolete_sm != NULL);
counts = vdev_indirect_mapping_load_obsolete_counts(vim);
if (vd->vdev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
vd->vdev_obsolete_sm);
}
if (scip->scip_vdev == vd->vdev_id &&
scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
prev_obsolete_sm);
space_map_close(prev_obsolete_sm);
}
return (counts);
}
typedef struct checkpoint_sm_exclude_entry_arg {
vdev_t *cseea_vd;
uint64_t cseea_checkpoint_size;
} checkpoint_sm_exclude_entry_arg_t;
static int
checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
{
checkpoint_sm_exclude_entry_arg_t *cseea = arg;
vdev_t *vd = cseea->cseea_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
/*
* Since the vdev_checkpoint_sm exists in the vdev level
* and the ms_sm space maps exist in the metaslab level,
* an entry in the checkpoint space map could theoretically
* cross the boundaries of the metaslab that it belongs.
*
* In reality, because of the way that we populate and
* manipulate the checkpoint's space maps currently,
* there shouldn't be any entries that cross metaslabs.
* Hence the assertion below.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* By removing the entry from the allocated segments we
* also verify that the entry is there to begin with.
*/
mutex_enter(&ms->ms_lock);
- range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
+ zfs_range_tree_remove(ms->ms_allocatable, sme->sme_offset,
+ sme->sme_run);
mutex_exit(&ms->ms_lock);
cseea->cseea_checkpoint_size += sme->sme_run;
return (0);
}
static void
zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb)
{
spa_t *spa = vd->vdev_spa;
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
/*
* If there is no vdev_top_zap, we are in a pool whose
* version predates the pool checkpoint feature.
*/
if (vd->vdev_top_zap == 0)
return;
/*
* If there is no reference of the vdev_checkpoint_sm in
* the vdev_top_zap, then one of the following scenarios
* is true:
*
* 1] There is no checkpoint
* 2] There is a checkpoint, but no checkpointed blocks
* have been freed yet
* 3] The current vdev is indirect
*
* In these cases we return immediately.
*/
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
return;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1,
&checkpoint_sm_obj));
checkpoint_sm_exclude_entry_arg_t cseea;
cseea.cseea_vd = vd;
cseea.cseea_checkpoint_size = 0;
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
checkpoint_sm_exclude_entry_cb, &cseea));
space_map_close(checkpoint_sm);
zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size;
}
static void
zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id);
zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb);
}
}
static int
count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
int64_t *ualloc_space = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
*ualloc_space += sme->sme_run;
else
*ualloc_space -= sme->sme_run;
return (0);
}
static int64_t
get_unflushed_alloc_space(spa_t *spa)
{
if (dump_opt['L'])
return (0);
int64_t ualloc_space = 0;
iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
&ualloc_space);
return (ualloc_space);
}
static int
load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
{
maptype_t *uic_maptype = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (*uic_maptype == sme->sme_type)
- range_tree_add(ms->ms_allocatable, offset, size);
+ zfs_range_tree_add(ms->ms_allocatable, offset, size);
else
- range_tree_remove(ms->ms_allocatable, offset, size);
+ zfs_range_tree_remove(ms->ms_allocatable, offset, size);
return (0);
}
static void
load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
{
iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
}
static void
load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
ASSERT3U(i, ==, vd->vdev_id);
if (vd->vdev_ops == &vdev_indirect_ops)
continue;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
(void) fprintf(stderr,
"\rloading concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)msp->ms_id,
(longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock);
- range_tree_vacate(msp->ms_allocatable, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
msp->ms_allocatable, maptype));
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
}
load_unflushed_to_ms_allocatables(spa, maptype);
}
/*
* vm_idxp is an in-out parameter which (for indirect vdevs) is the
* index in vim_entries that has the first entry in this metaslab.
* On return, it will be set to the first entry after this metaslab.
*/
static void
load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
uint64_t *vim_idxp)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock);
- range_tree_vacate(msp->ms_allocatable, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
(*vim_idxp)++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[*vim_idxp];
uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
ASSERT3U(ent_offset, >=, msp->ms_start);
if (ent_offset >= msp->ms_start + msp->ms_size)
break;
/*
* Mappings do not cross metaslab boundaries,
* because we create them by walking the metaslabs.
*/
ASSERT3U(ent_offset + ent_len, <=,
msp->ms_start + msp->ms_size);
- range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
+ zfs_range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
static void
zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
ASSERT3U(c, ==, vd->vdev_id);
if (vd->vdev_ops != &vdev_indirect_ops)
continue;
/*
* Note: we don't check for mapping leaks on
* removing vdevs because their ms_allocatable's
* are used to look for leaks in allocated space.
*/
zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd);
/*
* Normally, indirect vdevs don't have any
* metaslabs. We want to set them up for
* zio_claim().
*/
vdev_metaslab_group_create(vd);
VERIFY0(vdev_metaslab_init(vd, 0));
vdev_indirect_mapping_t *vim __maybe_unused =
vd->vdev_indirect_mapping;
uint64_t vim_idx = 0;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
(void) fprintf(stderr,
"\rloading indirect vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vd->vdev_ms[m]->ms_id,
(longlong_t)vd->vdev_ms_count);
load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m],
&vim_idx);
}
ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim));
}
}
static void
zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
zcb->zcb_spa = spa;
if (dump_opt['L'])
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_allocatable. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
spa->spa_embedded_log_class->mc_ops = &zdb_metaslab_ops;
zcb->zcb_vd_obsolete_counts =
umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
UMEM_NOFAIL);
/*
* For leak detection, we overload the ms_allocatable trees
* to contain allocated segments instead of free segments.
* As a result, we can't use the normal metaslab_load/unload
* interfaces.
*/
zdb_leak_init_prepare_indirect_vdevs(spa, zcb);
load_concrete_ms_allocatable_trees(spa, SM_ALLOC);
/*
* On load_concrete_ms_allocatable_trees() we loaded all the
* allocated entries from the ms_sm to the ms_allocatable for
* each metaslab. If the pool has a checkpoint or is in the
* middle of discarding a checkpoint, some of these blocks
* may have been freed but their ms_sm may not have been
* updated because they are referenced by the checkpoint. In
* order to avoid false-positives during leak-detection, we
* go through the vdev's checkpoint space map and exclude all
* its entries from their relevant ms_allocatable.
*
* We also aggregate the space held by the checkpoint and add
* it to zcb_checkpoint_size.
*
* Note that at this point we are also verifying that all the
* entries on the checkpoint_sm are marked as allocated in
* the ms_sm of their relevant metaslab.
* [see comment in checkpoint_sm_exclude_entry_cb()]
*/
zdb_leak_init_exclude_checkpoint(spa, zcb);
ASSERT3U(zcb->zcb_checkpoint_size, ==, spa_get_checkpoint_space(spa));
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
(void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
increment_indirect_mapping_cb, zcb, NULL);
}
}
static boolean_t
zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
{
boolean_t leaks = B_FALSE;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t total_leaked = 0;
boolean_t are_precise = B_FALSE;
ASSERT(vim != NULL);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
uint64_t obsolete_bytes = 0;
uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
/*
* This is not very efficient but it's easy to
* verify correctness.
*/
for (uint64_t inner_offset = 0;
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
inner_offset += 1ULL << vd->vdev_ashift) {
- if (range_tree_contains(msp->ms_allocatable,
+ if (zfs_range_tree_contains(msp->ms_allocatable,
offset + inner_offset, 1ULL << vd->vdev_ashift)) {
obsolete_bytes += 1ULL << vd->vdev_ashift;
}
}
int64_t bytes_leaked = obsolete_bytes -
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (bytes_leaked != 0 && (are_precise || dump_opt['d'] >= 5)) {
(void) printf("obsolete indirect mapping count "
"mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(u_longlong_t)bytes_leaked);
}
total_leaked += ABS(bytes_leaked);
}
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (!are_precise && total_leaked > 0) {
int pct_leaked = total_leaked * 100 /
vdev_indirect_mapping_bytes_mapped(vim);
(void) printf("cannot verify obsolete indirect mapping "
"counts of vdev %llu because precise feature was not "
"enabled when it was removed: %d%% (%llx bytes) of mapping"
"unreferenced\n",
(u_longlong_t)vd->vdev_id, pct_leaked,
(u_longlong_t)total_leaked);
} else if (total_leaked > 0) {
(void) printf("obsolete indirect mapping count mismatch "
"for vdev %llu -- %llx total bytes mismatched\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)total_leaked);
leaks |= B_TRUE;
}
vdev_indirect_mapping_free_obsolete_counts(vim,
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
return (leaks);
}
static boolean_t
zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return (B_FALSE);
boolean_t leaks = B_FALSE;
vdev_t *rvd = spa->spa_root_vdev;
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, (msp->ms_group->mg_class ==
spa_embedded_log_class(spa)) ?
vd->vdev_log_mg : vd->vdev_mg);
/*
* ms_allocatable has been overloaded
* to contain allocated segments. Now that
* we finished traversing all blocks, any
* block that remains in the ms_allocatable
* represents an allocated block that we
* did not claim during the traversal.
* Claimed blocks would have been removed
* from the ms_allocatable. For indirect
* vdevs, space remaining in the tree
* represents parts of the mapping that are
* not referenced, which is not a bug.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
- range_tree_vacate(msp->ms_allocatable,
+ zfs_range_tree_vacate(msp->ms_allocatable,
NULL, NULL);
} else {
- range_tree_vacate(msp->ms_allocatable,
+ zfs_range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
}
}
umem_free(zcb->zcb_vd_obsolete_counts,
rvd->vdev_children * sizeof (uint32_t *));
zcb->zcb_vd_obsolete_counts = NULL;
return (leaks);
}
static int
count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
if (dump_opt['b'] >= 5) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("[%s] %s\n",
"deferred free", blkbuf);
}
zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
return (0);
}
/*
* Iterate over livelists which have been destroyed by the user but
* are still present in the MOS, waiting to be freed
*/
static void
iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg)
{
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
ASSERT0(err);
zap_cursor_t zc;
zap_attribute_t *attrp = zap_attribute_alloc();
dsl_deadlist_t ll;
/* NULL out os prior to dsl_deadlist_open in case it's garbage */
ll.dl_os = NULL;
for (zap_cursor_init(&zc, mos, zap_obj);
zap_cursor_retrieve(&zc, attrp) == 0;
(void) zap_cursor_advance(&zc)) {
VERIFY0(dsl_deadlist_open(&ll, mos, attrp->za_first_integer));
func(&ll, arg);
dsl_deadlist_close(&ll);
}
zap_cursor_fini(&zc);
zap_attribute_free(attrp);
}
static int
bpobj_count_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (count_block_cb(arg, bp, tx));
}
static int
livelist_entry_count_blocks_cb(void *args, dsl_deadlist_entry_t *dle)
{
zdb_cb_t *zbc = args;
bplist_t blks;
bplist_create(&blks);
/* determine which blocks have been alloc'd but not freed */
VERIFY0(dsl_process_sub_livelist(&dle->dle_bpobj, &blks, NULL, NULL));
/* count those blocks */
(void) bplist_iterate(&blks, count_block_cb, zbc, NULL);
bplist_destroy(&blks);
return (0);
}
static void
livelist_count_blocks(dsl_deadlist_t *ll, void *arg)
{
dsl_deadlist_iterate(ll, livelist_entry_count_blocks_cb, arg);
}
/*
* Count the blocks in the livelists that have been destroyed by the user
* but haven't yet been freed.
*/
static void
deleted_livelists_count_blocks(spa_t *spa, zdb_cb_t *zbc)
{
iterate_deleted_livelists(spa, livelist_count_blocks, zbc);
}
static void
dump_livelist_cb(dsl_deadlist_t *ll, void *arg)
{
ASSERT3P(arg, ==, NULL);
global_feature_count[SPA_FEATURE_LIVELIST]++;
dump_blkptr_list(ll, "Deleted Livelist");
dsl_deadlist_iterate(ll, sublivelist_verify_lightweight, NULL);
}
/*
* Print out, register object references to, and increment feature counts for
* livelists that have been destroyed by the user but haven't yet been freed.
*/
static void
deleted_livelists_dump_mos(spa_t *spa)
{
uint64_t zap_obj;
objset_t *mos = spa->spa_meta_objset;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
mos_obj_refd(zap_obj);
iterate_deleted_livelists(spa, dump_livelist_cb, NULL);
}
static int
zdb_brt_entry_compare(const void *zcn1, const void *zcn2)
{
const dva_t *dva1 = &((const zdb_brt_entry_t *)zcn1)->zbre_dva;
const dva_t *dva2 = &((const zdb_brt_entry_t *)zcn2)->zbre_dva;
int cmp;
cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
if (cmp == 0)
cmp = TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2));
return (cmp);
}
static int
dump_block_stats(spa_t *spa)
{
zdb_cb_t *zcb;
zdb_blkstats_t *zb, *tzb;
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
int e, c, err;
bp_embedded_type_t i;
ddt_prefetch_all(spa);
zcb = umem_zalloc(sizeof (zdb_cb_t), UMEM_NOFAIL);
if (spa_feature_is_active(spa, SPA_FEATURE_BLOCK_CLONING)) {
avl_create(&zcb->zcb_brt, zdb_brt_entry_compare,
sizeof (zdb_brt_entry_t),
offsetof(zdb_brt_entry_t, zbre_node));
zcb->zcb_brt_is_active = B_TRUE;
}
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
dump_opt['c'] ? "checksums " : "",
(dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
!dump_opt['L'] ? "nothing leaked " : "");
/*
* When leak detection is enabled we load all space maps as SM_ALLOC
* maps, then traverse the pool claiming each block we discover. If
* the pool is perfectly consistent, the segment trees will be empty
* when we're done. Anything left over is a leak; any block we can't
* claim (because it's not part of any space map) is a double
* allocation, reference to a freed block, or an unclaimed log block.
*
* When leak detection is disabled (-L option) we still traverse the
* pool claiming each block we discover, but we skip opening any space
* maps.
*/
zdb_leak_init(spa, zcb);
/*
* If there's a deferred-free bplist, process that first.
*/
(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
bpobj_count_block_cb, zcb, NULL);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
bpobj_count_block_cb, zcb, NULL);
}
zdb_claim_removing(spa, zcb);
if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
zcb, NULL));
}
deleted_livelists_count_blocks(spa, zcb);
if (dump_opt['c'] > 1)
flags |= TRAVERSE_PREFETCH_DATA;
zcb->zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb->zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
zcb->zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
zcb->zcb_totalasize +=
metaslab_class_get_alloc(spa_embedded_log_class(spa));
zcb->zcb_start = zcb->zcb_lastprint = gethrtime();
err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, zcb);
/*
* If we've traversed the data blocks then we need to wait for those
* I/Os to complete. We leverage "The Godfather" zio to wait on
* all async I/Os to complete.
*/
if (dump_opt['c']) {
for (c = 0; c < max_ncpus; c++) {
(void) zio_wait(spa->spa_async_zio_root[c]);
spa->spa_async_zio_root[c] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
}
ASSERT0(spa->spa_load_verify_bytes);
/*
* Done after zio_wait() since zcb_haderrors is modified in
* zdb_blkptr_done()
*/
zcb->zcb_haderrors |= err;
if (zcb->zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
for (e = 0; e < 256; e++) {
if (zcb->zcb_errors[e] != 0) {
(void) printf("\t%5d %llu\n",
e, (u_longlong_t)zcb->zcb_errors[e]);
}
}
}
/*
* Report any leaked segments.
*/
leaks |= zdb_leak_fini(spa, zcb);
tzb = &zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
norm_space = metaslab_class_get_space(spa_normal_class(spa));
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_embedded_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
metaslab_class_get_alloc(spa_dedup_class(spa)) +
get_unflushed_alloc_space(spa);
total_found =
tzb->zb_asize - zcb->zcb_dedup_asize - zcb->zcb_clone_asize +
zcb->zcb_removing_size + zcb->zcb_checkpoint_size;
if (total_found == total_alloc && !dump_opt['L']) {
(void) printf("\n\tNo leaks (block sum matches space"
" maps exactly)\n");
} else if (!dump_opt['L']) {
(void) printf("block traversal size %llu != alloc %llu "
"(%s %lld)\n",
(u_longlong_t)total_found,
(u_longlong_t)total_alloc,
(dump_opt['L']) ? "unreachable" : "leaked",
(longlong_t)(total_alloc - total_found));
}
if (tzb->zb_count == 0) {
umem_free(zcb, sizeof (zdb_cb_t));
return (2);
}
(void) printf("\n");
(void) printf("\t%-16s %14llu\n", "bp count:",
(u_longlong_t)tzb->zb_count);
(void) printf("\t%-16s %14llu\n", "ganged count:",
(longlong_t)tzb->zb_gangs);
(void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:",
(u_longlong_t)tzb->zb_lsize,
(u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp physical:", (u_longlong_t)tzb->zb_psize,
(u_longlong_t)(tzb->zb_psize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_psize);
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp allocated:", (u_longlong_t)tzb->zb_asize,
(u_longlong_t)(tzb->zb_asize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_asize);
(void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n",
"bp deduped:", (u_longlong_t)zcb->zcb_dedup_asize,
(u_longlong_t)zcb->zcb_dedup_blocks,
(double)zcb->zcb_dedup_asize / tzb->zb_asize + 1.0);
(void) printf("\t%-16s %14llu count: %6llu\n",
"bp cloned:", (u_longlong_t)zcb->zcb_clone_asize,
(u_longlong_t)zcb->zcb_clone_blocks);
(void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
(u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
if (spa_special_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_special_class(spa));
uint64_t space = metaslab_class_get_space(
spa_special_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Special class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_dedup_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_dedup_class(spa));
uint64_t space = metaslab_class_get_space(
spa_dedup_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Dedup class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_embedded_log_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_embedded_log_class(spa));
uint64_t space = metaslab_class_get_space(
spa_embedded_log_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Embedded log class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
if (zcb->zcb_embedded_blocks[i] == 0)
continue;
(void) printf("\n");
(void) printf("\tadditional, non-pointer bps of type %u: "
"%10llu\n",
i, (u_longlong_t)zcb->zcb_embedded_blocks[i]);
if (dump_opt['b'] >= 3) {
(void) printf("\t number of (compressed) bytes: "
"number of bps\n");
dump_histogram(zcb->zcb_embedded_histogram[i],
sizeof (zcb->zcb_embedded_histogram[i]) /
sizeof (zcb->zcb_embedded_histogram[i][0]), 0);
}
}
if (tzb->zb_ditto_samevdev != 0) {
(void) printf("\tDittoed blocks on same vdev: %llu\n",
(longlong_t)tzb->zb_ditto_samevdev);
}
if (tzb->zb_ditto_same_ms != 0) {
(void) printf("\tDittoed blocks in same metaslab: %llu\n",
(longlong_t)tzb->zb_ditto_same_ms);
}
for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
if (vim == NULL) {
continue;
}
char mem[32];
zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
mem, vdev_indirect_mapping_size(vim));
(void) printf("\tindirect vdev id %llu has %llu segments "
"(%s in memory)\n",
(longlong_t)vd->vdev_id,
(longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
}
if (dump_opt['b'] >= 2) {
int l, t, level;
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
"\t avg\t comp\t%%Total\tType\n");
zfs_blkstat_t *mdstats = umem_zalloc(sizeof (zfs_blkstat_t),
UMEM_NOFAIL);
for (t = 0; t <= ZDB_OT_TOTAL; t++) {
const char *typename;
/* make sure nicenum has enough space */
_Static_assert(sizeof (csize) >= NN_NUMBUF_SZ,
"csize truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ,
"lsize truncated");
_Static_assert(sizeof (psize) >= NN_NUMBUF_SZ,
"psize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ,
"asize truncated");
_Static_assert(sizeof (avg) >= NN_NUMBUF_SZ,
"avg truncated");
_Static_assert(sizeof (gang) >= NN_NUMBUF_SZ,
"gang truncated");
if (t < DMU_OT_NUMTYPES)
typename = dmu_ot[t].ot_name;
else
typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
if (zcb->zcb_type[ZB_TOTAL][t].zb_asize == 0) {
(void) printf("%6s\t%5s\t%5s\t%5s"
"\t%5s\t%5s\t%6s\t%s\n",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
typename);
continue;
}
for (l = ZB_TOTAL - 1; l >= -1; l--) {
level = (l == -1 ? ZB_TOTAL : l);
zb = &zcb->zcb_type[level][t];
if (zb->zb_asize == 0)
continue;
if (level != ZB_TOTAL && t < DMU_OT_NUMTYPES &&
(level > 0 || DMU_OT_IS_METADATA(t))) {
mdstats->zb_count += zb->zb_count;
mdstats->zb_lsize += zb->zb_lsize;
mdstats->zb_psize += zb->zb_psize;
mdstats->zb_asize += zb->zb_asize;
mdstats->zb_gangs += zb->zb_gangs;
}
if (dump_opt['b'] < 3 && level != ZB_TOTAL)
continue;
if (level == 0 && zb->zb_asize ==
zcb->zcb_type[ZB_TOTAL][t].zb_asize)
continue;
zdb_nicenum(zb->zb_count, csize,
sizeof (csize));
zdb_nicenum(zb->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(zb->zb_psize, psize,
sizeof (psize));
zdb_nicenum(zb->zb_asize, asize,
sizeof (asize));
zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
sizeof (avg));
zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)zb->zb_lsize / zb->zb_psize,
100.0 * zb->zb_asize / tzb->zb_asize);
if (level == ZB_TOTAL)
(void) printf("%s\n", typename);
else
(void) printf(" L%d %s\n",
level, typename);
if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
(void) printf("\t number of ganged "
"blocks: %s\n", gang);
}
if (dump_opt['b'] >= 4) {
(void) printf("psize "
"(in 512-byte sectors): "
"number of blocks\n");
dump_histogram(zb->zb_psize_histogram,
PSIZE_HISTO_SIZE, 0);
}
}
}
zdb_nicenum(mdstats->zb_count, csize,
sizeof (csize));
zdb_nicenum(mdstats->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(mdstats->zb_psize, psize,
sizeof (psize));
zdb_nicenum(mdstats->zb_asize, asize,
sizeof (asize));
zdb_nicenum(mdstats->zb_asize / mdstats->zb_count, avg,
sizeof (avg));
zdb_nicenum(mdstats->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)mdstats->zb_lsize / mdstats->zb_psize,
100.0 * mdstats->zb_asize / tzb->zb_asize);
(void) printf("%s\n", "Metadata Total");
/* Output a table summarizing block sizes in the pool */
if (dump_opt['b'] >= 2) {
dump_size_histograms(zcb);
}
umem_free(mdstats, sizeof (zfs_blkstat_t));
}
(void) printf("\n");
if (leaks) {
umem_free(zcb, sizeof (zdb_cb_t));
return (2);
}
if (zcb->zcb_haderrors) {
umem_free(zcb, sizeof (zdb_cb_t));
return (3);
}
umem_free(zcb, sizeof (zdb_cb_t));
return (0);
}
typedef struct zdb_ddt_entry {
/* key must be first for ddt_key_compare */
ddt_key_t zdde_key;
uint64_t zdde_ref_blocks;
uint64_t zdde_ref_lsize;
uint64_t zdde_ref_psize;
uint64_t zdde_ref_dsize;
avl_node_t zdde_node;
} zdb_ddt_entry_t;
static int
zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
(void) zilog, (void) dnp;
avl_tree_t *t = arg;
avl_index_t where;
zdb_ddt_entry_t *zdde, zdde_search;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp))
return (0);
if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
(void) printf("traversing objset %llu, %llu objects, "
"%lu blocks so far\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)BP_GET_FILL(bp),
avl_numnodes(t));
}
if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
return (0);
ddt_key_fill(&zdde_search.zdde_key, bp);
zdde = avl_find(t, &zdde_search, &where);
if (zdde == NULL) {
zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
zdde->zdde_key = zdde_search.zdde_key;
avl_insert(t, zdde, where);
}
zdde->zdde_ref_blocks += 1;
zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
return (0);
}
static void
dump_simulated_ddt(spa_t *spa)
{
avl_tree_t t;
void *cookie = NULL;
zdb_ddt_entry_t *zdde;
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
avl_create(&t, ddt_key_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
(void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
spa_config_exit(spa, SCL_CONFIG, FTAG);
while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
uint64_t refcnt = zdde->zdde_ref_blocks;
ASSERT(refcnt != 0);
ddt_stat_t *dds = &ddh_total.ddh_stat[highbit64(refcnt) - 1];
dds->dds_blocks += zdde->zdde_ref_blocks / refcnt;
dds->dds_lsize += zdde->zdde_ref_lsize / refcnt;
dds->dds_psize += zdde->zdde_ref_psize / refcnt;
dds->dds_dsize += zdde->zdde_ref_dsize / refcnt;
dds->dds_ref_blocks += zdde->zdde_ref_blocks;
dds->dds_ref_lsize += zdde->zdde_ref_lsize;
dds->dds_ref_psize += zdde->zdde_ref_psize;
dds->dds_ref_dsize += zdde->zdde_ref_dsize;
umem_free(zdde, sizeof (*zdde));
}
avl_destroy(&t);
ddt_histogram_total(&dds_total, &ddh_total);
(void) printf("Simulated DDT histogram:\n");
zpool_dump_ddt(&dds_total, &ddh_total);
dump_dedup_ratio(&dds_total);
}
static int
verify_device_removal_feature_counts(spa_t *spa)
{
uint64_t dr_feature_refcount = 0;
uint64_t oc_feature_refcount = 0;
uint64_t indirect_vdev_count = 0;
uint64_t precise_vdev_count = 0;
uint64_t obsolete_counts_object_count = 0;
uint64_t obsolete_sm_count = 0;
uint64_t obsolete_counts_count = 0;
uint64_t scip_count = 0;
uint64_t obsolete_bpobj_count = 0;
int ret = 0;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
if (scip->scip_next_mapping_object != 0) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
(void) printf("Condensing indirect vdev %llu: new mapping "
"object %llu, prev obsolete sm %llu\n",
(u_longlong_t)scip->scip_vdev,
(u_longlong_t)scip->scip_next_mapping_object,
(u_longlong_t)scip->scip_prev_obsolete_sm_object);
if (scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm,
spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object,
0, vd->vdev_asize, 0));
dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
(void) printf("\n");
space_map_close(prev_obsolete_sm);
}
scip_count += 2;
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (vic->vic_mapping_object != 0) {
ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
vd->vdev_removing);
indirect_vdev_count++;
if (vd->vdev_indirect_mapping->vim_havecounts) {
obsolete_counts_count++;
}
}
boolean_t are_precise;
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (are_precise) {
ASSERT(vic->vic_mapping_object != 0);
precise_vdev_count++;
}
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vic->vic_mapping_object != 0);
obsolete_sm_count++;
}
}
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
&dr_feature_refcount);
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
&oc_feature_refcount);
if (dr_feature_refcount != indirect_vdev_count) {
ret = 1;
(void) printf("Number of indirect vdevs (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)indirect_vdev_count,
(u_longlong_t)dr_feature_refcount);
} else {
(void) printf("Verified device_removal feature refcount " \
"of %llu is correct\n",
(u_longlong_t)dr_feature_refcount);
}
if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ) == 0) {
obsolete_bpobj_count++;
}
obsolete_counts_object_count = precise_vdev_count;
obsolete_counts_object_count += obsolete_sm_count;
obsolete_counts_object_count += obsolete_counts_count;
obsolete_counts_object_count += scip_count;
obsolete_counts_object_count += obsolete_bpobj_count;
obsolete_counts_object_count += remap_deadlist_count;
if (oc_feature_refcount != obsolete_counts_object_count) {
ret = 1;
(void) printf("Number of obsolete counts objects (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)obsolete_counts_object_count,
(u_longlong_t)oc_feature_refcount);
(void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
"ob:%llu rd:%llu\n",
(u_longlong_t)precise_vdev_count,
(u_longlong_t)obsolete_sm_count,
(u_longlong_t)obsolete_counts_count,
(u_longlong_t)scip_count,
(u_longlong_t)obsolete_bpobj_count,
(u_longlong_t)remap_deadlist_count);
} else {
(void) printf("Verified indirect_refcount feature refcount " \
"of %llu is correct\n",
(u_longlong_t)oc_feature_refcount);
}
return (ret);
}
static void
zdb_set_skip_mmp(char *target)
{
spa_t *spa;
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
}
#define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
/*
* Import the checkpointed state of the pool specified by the target
* parameter as readonly. The function also accepts a pool config
* as an optional parameter, else it attempts to infer the config by
* the name of the target pool.
*
* Note that the checkpointed state's pool name will be the name of
* the original pool with the above suffix appended to it. In addition,
* if the target is not a pool name (e.g. a path to a dataset) then
* the new_path parameter is populated with the updated path to
* reflect the fact that we are looking into the checkpointed state.
*
* The function returns a newly-allocated copy of the name of the
* pool containing the checkpointed state. When this copy is no
* longer needed it should be freed with free(3C). Same thing
* applies to the new_path parameter if allocated.
*/
static char *
import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
{
int error = 0;
char *poolname, *bogus_name = NULL;
boolean_t freecfg = B_FALSE;
/* If the target is not a pool, the extract the pool name */
char *path_start = strchr(target, '/');
if (path_start != NULL) {
size_t poolname_len = path_start - target;
poolname = strndup(target, poolname_len);
} else {
poolname = target;
}
if (cfg == NULL) {
zdb_set_skip_mmp(poolname);
error = spa_get_stats(poolname, &cfg, NULL, 0);
if (error != 0) {
fatal("Tried to read config of pool \"%s\" but "
"spa_get_stats() failed with error %d\n",
poolname, error);
}
freecfg = B_TRUE;
}
if (asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX) == -1) {
if (target != poolname)
free(poolname);
return (NULL);
}
fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name);
error = spa_import(bogus_name, cfg, NULL,
ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
ZFS_IMPORT_SKIP_MMP);
if (freecfg)
nvlist_free(cfg);
if (error != 0) {
fatal("Tried to import pool \"%s\" but spa_import() failed "
"with error %d\n", bogus_name, error);
}
if (new_path != NULL && path_start != NULL) {
if (asprintf(new_path, "%s%s", bogus_name, path_start) == -1) {
free(bogus_name);
if (path_start != NULL)
free(poolname);
return (NULL);
}
}
if (target != poolname)
free(poolname);
return (bogus_name);
}
typedef struct verify_checkpoint_sm_entry_cb_arg {
vdev_t *vcsec_vd;
/* the following fields are only used for printing progress */
uint64_t vcsec_entryid;
uint64_t vcsec_num_entries;
} verify_checkpoint_sm_entry_cb_arg_t;
#define ENTRIES_PER_PROGRESS_UPDATE 10000
static int
verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
{
verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg;
vdev_t *vd = vcsec->vcsec_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) {
(void) fprintf(stderr,
"\rverifying vdev %llu, space map entry %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vcsec->vcsec_entryid,
(longlong_t)vcsec->vcsec_num_entries);
}
vcsec->vcsec_entryid++;
/*
* See comment in checkpoint_sm_exclude_entry_cb()
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* The entries in the vdev_checkpoint_sm should be marked as
* allocated in the checkpointed state of the pool, therefore
* their respective ms_allocateable trees should not contain them.
*/
mutex_enter(&ms->ms_lock);
- range_tree_verify_not_present(ms->ms_allocatable,
+ zfs_range_tree_verify_not_present(ms->ms_allocatable,
sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
return (0);
}
/*
* Verify that all segments in the vdev_checkpoint_sm are allocated
* according to the checkpoint's ms_sm (i.e. are not in the checkpoint's
* ms_allocatable).
*
* Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of
* each vdev in the current state of the pool to the metaslab space maps
* (ms_sm) of the checkpointed state of the pool.
*
* Note that the function changes the state of the ms_allocatable
* trees of the current spa_t. The entries of these ms_allocatable
* trees are cleared out and then repopulated from with the free
* entries of their respective ms_sm space maps.
*/
static void
verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_FREE);
for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c];
vdev_t *current_vd = current_rvd->vdev_child[c];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* Since we don't allow device removal in a pool
* that has a checkpoint, we expect that all removed
* vdevs were removed from the pool before the
* checkpoint.
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
/*
* If the checkpoint space map doesn't exist, then nothing
* here is checkpointed so there's nothing to verify.
*/
if (current_vd->vdev_top_zap == 0 ||
zap_contains(spa_meta_objset(current),
current_vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(current),
current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current),
checkpoint_sm_obj, 0, current_vd->vdev_asize,
current_vd->vdev_ashift));
verify_checkpoint_sm_entry_cb_arg_t vcsec;
vcsec.vcsec_vd = ckpoint_vd;
vcsec.vcsec_entryid = 0;
vcsec.vcsec_num_entries =
space_map_length(checkpoint_sm) / sizeof (uint64_t);
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
verify_checkpoint_sm_entry_cb, &vcsec));
if (dump_opt['m'] > 3)
dump_spacemap(current->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
/*
* If we've added vdevs since we took the checkpoint, ensure
* that their checkpoint space maps are empty.
*/
if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) {
for (uint64_t c = ckpoint_rvd->vdev_children;
c < current_rvd->vdev_children; c++) {
vdev_t *current_vd = current_rvd->vdev_child[c];
VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
/*
* Verifies that all space that's allocated in the checkpoint is
* still allocated in the current version, by checking that everything
* in checkpoint's ms_allocatable (which is actually allocated, not
* allocatable/free) is not present in current's ms_allocatable.
*
* Note that the function changes the state of the ms_allocatable
* trees of both spas when called. The entries of all ms_allocatable
* trees are cleared out and then repopulated from their respective
* ms_sm space maps. In the checkpointed state we load the allocated
* entries, and in the current state we load the free entries.
*/
static void
verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC);
load_concrete_ms_allocatable_trees(current, SM_FREE);
for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i];
vdev_t *current_vd = current_rvd->vdev_child[i];
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* See comment in verify_checkpoint_vdev_spacemaps()
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) {
metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m];
metaslab_t *current_msp = current_vd->vdev_ms[m];
(void) fprintf(stderr,
"\rverifying vdev %llu of %llu, "
"metaslab %llu of %llu ...",
(longlong_t)current_vd->vdev_id,
(longlong_t)current_rvd->vdev_children,
(longlong_t)current_vd->vdev_ms[m]->ms_id,
(longlong_t)current_vd->vdev_ms_count);
/*
* We walk through the ms_allocatable trees that
* are loaded with the allocated blocks from the
* ms_sm spacemaps of the checkpoint. For each
* one of these ranges we ensure that none of them
* exists in the ms_allocatable trees of the
* current state which are loaded with the ranges
* that are currently free.
*
* This way we ensure that none of the blocks that
* are part of the checkpoint were freed by mistake.
*/
- range_tree_walk(ckpoint_msp->ms_allocatable,
- (range_tree_func_t *)range_tree_verify_not_present,
+ zfs_range_tree_walk(ckpoint_msp->ms_allocatable,
+ (zfs_range_tree_func_t *)
+ zfs_range_tree_verify_not_present,
current_msp->ms_allocatable);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
static void
verify_checkpoint_blocks(spa_t *spa)
{
ASSERT(!dump_opt['L']);
spa_t *checkpoint_spa;
char *checkpoint_pool;
int error = 0;
/*
* We import the checkpointed state of the pool (under a different
* name) so we can do verification on it against the current state
* of the pool.
*/
checkpoint_pool = import_checkpointed_state(spa->spa_name, NULL,
NULL);
ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but spa_open() failed with "
"error %d\n", checkpoint_pool, error);
}
/*
* Ensure that ranges in the checkpoint space maps of each vdev
* are allocated according to the checkpointed state's metaslab
* space maps.
*/
verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa);
/*
* Ensure that allocated ranges in the checkpoint's metaslab
* space maps remain allocated in the metaslab space maps of
* the current state.
*/
verify_checkpoint_ms_spacemaps(checkpoint_spa, spa);
/*
* Once we are done, we get rid of the checkpointed state.
*/
spa_close(checkpoint_spa, FTAG);
free(checkpoint_pool);
}
static void
dump_leftover_checkpoint_blocks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (vd->vdev_top_zap == 0)
continue;
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
dump_spacemap(spa->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
}
static int
verify_checkpoint(spa_t *spa)
{
uberblock_t checkpoint;
int error;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (0);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT && !dump_opt['L']) {
/*
* If the feature is active but the uberblock is missing
* then we must be in the middle of discarding the
* checkpoint.
*/
(void) printf("\nPartially discarded checkpoint "
"state found:\n");
if (dump_opt['m'] > 3)
dump_leftover_checkpoint_blocks(spa);
return (0);
} else if (error != 0) {
(void) printf("lookup error %d when looking for "
"checkpointed uberblock in MOS\n", error);
return (error);
}
dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n");
if (checkpoint.ub_checkpoint_txg == 0) {
(void) printf("\nub_checkpoint_txg not set in checkpointed "
"uberblock\n");
error = 3;
}
if (error == 0 && !dump_opt['L'])
verify_checkpoint_blocks(spa);
return (error);
}
static void
mos_leaks_cb(void *arg, uint64_t start, uint64_t size)
{
(void) arg;
for (uint64_t i = start; i < size; i++) {
(void) printf("MOS object %llu referenced but not allocated\n",
(u_longlong_t)i);
}
}
static void
mos_obj_refd(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL)
- range_tree_add(mos_refd_objs, obj, 1);
+ zfs_range_tree_add(mos_refd_objs, obj, 1);
}
/*
* Call on a MOS object that may already have been referenced.
*/
static void
mos_obj_refd_multiple(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL &&
- !range_tree_contains(mos_refd_objs, obj, 1))
- range_tree_add(mos_refd_objs, obj, 1);
+ !zfs_range_tree_contains(mos_refd_objs, obj, 1))
+ zfs_range_tree_add(mos_refd_objs, obj, 1);
}
static void
mos_leak_vdev_top_zap(vdev_t *vd)
{
uint64_t ms_flush_data_obj;
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(ms_flush_data_obj);
}
static void
mos_leak_vdev(vdev_t *vd)
{
mos_obj_refd(vd->vdev_dtl_object);
mos_obj_refd(vd->vdev_ms_array);
mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
mos_obj_refd(vd->vdev_leaf_zap);
if (vd->vdev_checkpoint_sm != NULL)
mos_obj_refd(vd->vdev_checkpoint_sm->sm_object);
if (vd->vdev_indirect_mapping != NULL) {
mos_obj_refd(vd->vdev_indirect_mapping->
vim_phys->vimp_counts_object);
}
if (vd->vdev_obsolete_sm != NULL)
mos_obj_refd(vd->vdev_obsolete_sm->sm_object);
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
mos_obj_refd(space_map_object(ms->ms_sm));
}
if (vd->vdev_root_zap != 0)
mos_obj_refd(vd->vdev_root_zap);
if (vd->vdev_top_zap != 0) {
mos_obj_refd(vd->vdev_top_zap);
mos_leak_vdev_top_zap(vd);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
mos_leak_vdev(vd->vdev_child[c]);
}
}
static void
mos_leak_log_spacemaps(spa_t *spa)
{
uint64_t spacemap_zap;
int error = zap_lookup(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(spacemap_zap);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
mos_obj_refd(sls->sls_sm_obj);
}
static void
errorlog_count_refd(objset_t *mos, uint64_t errlog)
{
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
for (zap_cursor_init(&zc, mos, errlog);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
mos_obj_refd(za->za_first_integer);
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
}
static int
dump_mos_leaks(spa_t *spa)
{
int rv = 0;
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
/* Visit and mark all referenced objects in the MOS */
mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT);
mos_obj_refd(spa->spa_pool_props_object);
mos_obj_refd(spa->spa_config_object);
mos_obj_refd(spa->spa_ddt_stat_object);
mos_obj_refd(spa->spa_feat_desc_obj);
mos_obj_refd(spa->spa_feat_enabled_txg_obj);
mos_obj_refd(spa->spa_feat_for_read_obj);
mos_obj_refd(spa->spa_feat_for_write_obj);
mos_obj_refd(spa->spa_history);
mos_obj_refd(spa->spa_errlog_last);
mos_obj_refd(spa->spa_errlog_scrub);
if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
errorlog_count_refd(mos, spa->spa_errlog_last);
errorlog_count_refd(mos, spa->spa_errlog_scrub);
}
mos_obj_refd(spa->spa_all_vdev_zaps);
mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj);
bpobj_count_refd(&spa->spa_deferred_bpobj);
mos_obj_refd(dp->dp_empty_bpobj);
bpobj_count_refd(&dp->dp_obsolete_bpobj);
bpobj_count_refd(&dp->dp_free_bpobj);
mos_obj_refd(spa->spa_l2cache.sav_object);
mos_obj_refd(spa->spa_spares.sav_object);
if (spa->spa_syncing_log_sm != NULL)
mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
mos_leak_log_spacemaps(spa);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_next_mapping_object);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_prev_obsolete_sm_object);
if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) {
vdev_indirect_mapping_t *vim =
vdev_indirect_mapping_open(mos,
spa->spa_condensing_indirect_phys.scip_next_mapping_object);
mos_obj_refd(vim->vim_phys->vimp_counts_object);
vdev_indirect_mapping_close(vim);
}
deleted_livelists_dump_mos(spa);
if (dp->dp_origin_snap != NULL) {
dsl_dataset_t *ds;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj,
FTAG, &ds));
count_ds_mos_objects(ds);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
dsl_dataset_rele(ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
count_ds_mos_objects(dp->dp_origin_snap);
dump_blkptr_list(&dp->dp_origin_snap->ds_deadlist, "Deadlist");
}
count_dir_mos_objects(dp->dp_mos_dir);
if (dp->dp_free_dir != NULL)
count_dir_mos_objects(dp->dp_free_dir);
if (dp->dp_leak_dir != NULL)
count_dir_mos_objects(dp->dp_leak_dir);
mos_leak_vdev(spa->spa_root_vdev);
for (uint64_t c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
if (!ddt || ddt->ddt_version == DDT_VERSION_UNCONFIGURED)
continue;
/* DDT store objects */
for (ddt_type_t type = 0; type < DDT_TYPES; type++) {
for (ddt_class_t class = 0; class < DDT_CLASSES;
class++) {
mos_obj_refd(ddt->ddt_object[type][class]);
}
}
/* FDT container */
if (ddt->ddt_version == DDT_VERSION_FDT)
mos_obj_refd(ddt->ddt_dir_object);
/* FDT log objects */
if (ddt->ddt_flags & DDT_FLAG_LOG) {
mos_obj_refd(ddt->ddt_log[0].ddl_object);
mos_obj_refd(ddt->ddt_log[1].ddl_object);
}
}
for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
if (brtvd->bv_initiated) {
mos_obj_refd(brtvd->bv_mos_brtvdev);
mos_obj_refd(brtvd->bv_mos_entries);
}
}
/*
* Visit all allocated objects and make sure they are referenced.
*/
uint64_t object = 0;
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
- if (range_tree_contains(mos_refd_objs, object, 1)) {
- range_tree_remove(mos_refd_objs, object, 1);
+ if (zfs_range_tree_contains(mos_refd_objs, object, 1)) {
+ zfs_range_tree_remove(mos_refd_objs, object, 1);
} else {
dmu_object_info_t doi;
const char *name;
VERIFY0(dmu_object_info(mos, object, &doi));
if (doi.doi_type & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(doi.doi_type);
name = dmu_ot_byteswap[bswap].ob_name;
} else {
name = dmu_ot[doi.doi_type].ot_name;
}
(void) printf("MOS object %llu (%s) leaked\n",
(u_longlong_t)object, name);
rv = 2;
}
}
- (void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
- if (!range_tree_is_empty(mos_refd_objs))
+ (void) zfs_range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
+ if (!zfs_range_tree_is_empty(mos_refd_objs))
rv = 2;
- range_tree_vacate(mos_refd_objs, NULL, NULL);
- range_tree_destroy(mos_refd_objs);
+ zfs_range_tree_vacate(mos_refd_objs, NULL, NULL);
+ zfs_range_tree_destroy(mos_refd_objs);
return (rv);
}
typedef struct log_sm_obsolete_stats_arg {
uint64_t lsos_current_txg;
uint64_t lsos_total_entries;
uint64_t lsos_valid_entries;
uint64_t lsos_sm_entries;
uint64_t lsos_valid_sm_entries;
} log_sm_obsolete_stats_arg_t;
static int
log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
log_sm_obsolete_stats_arg_t *lsos = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
if (lsos->lsos_current_txg == 0) {
/* this is the first log */
lsos->lsos_current_txg = txg;
} else if (lsos->lsos_current_txg < txg) {
/* we just changed log - print stats and reset */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos->lsos_valid_sm_entries,
(u_longlong_t)lsos->lsos_sm_entries,
(u_longlong_t)lsos->lsos_current_txg);
lsos->lsos_valid_sm_entries = 0;
lsos->lsos_sm_entries = 0;
lsos->lsos_current_txg = txg;
}
ASSERT3U(lsos->lsos_current_txg, ==, txg);
lsos->lsos_sm_entries++;
lsos->lsos_total_entries++;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
lsos->lsos_valid_sm_entries++;
lsos->lsos_valid_entries++;
return (0);
}
static void
dump_log_spacemap_obsolete_stats(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
log_sm_obsolete_stats_arg_t lsos = {0};
(void) printf("Log Space Map Obsolete Entry Statistics:\n");
iterate_through_spacemap_logs(spa,
log_spacemap_obsolete_stats_cb, &lsos);
/* print stats for latest log */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos.lsos_valid_sm_entries,
(u_longlong_t)lsos.lsos_sm_entries,
(u_longlong_t)lsos.lsos_current_txg);
(void) printf("%-8llu valid entries out of %-8llu - total\n\n",
(u_longlong_t)lsos.lsos_valid_entries,
(u_longlong_t)lsos.lsos_total_entries);
}
static void
dump_zpool(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
int rc = 0;
if (dump_opt['y']) {
livelist_metaslab_validate(spa);
}
if (dump_opt['S']) {
dump_simulated_ddt(spa);
return;
}
if (!dump_opt['e'] && dump_opt['C'] > 1) {
(void) printf("\nCached configuration:\n");
dump_nvlist(spa->spa_config, 8);
}
if (dump_opt['C'])
dump_config(spa);
if (dump_opt['u'])
dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
if (dump_opt['D'])
dump_all_ddts(spa);
if (dump_opt['T'])
dump_brt(spa);
if (dump_opt['d'] > 2 || dump_opt['m'])
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa, dump_opt['M'] > 1);
if (dump_opt['d'] > 2 || dump_opt['m']) {
dump_log_spacemaps(spa);
dump_log_spacemap_obsolete_stats(spa);
}
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
- mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
- 0);
+ mos_refd_objs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
+ NULL, 0, 0);
dump_objset(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
dsl_pool_t *dp = spa->spa_dsl_pool;
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_full_bpobj(&dp->dp_free_bpobj,
"Pool snapshot frees", 0);
}
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
dump_full_bpobj(&dp->dp_obsolete_bpobj,
"Pool obsolete blocks", 0);
}
if (spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dump_bptree(spa->spa_meta_objset,
dp->dp_bptree_obj,
"Pool dataset frees");
}
dump_dtl(spa->spa_root_vdev, 0);
}
for (spa_feature_t f = 0; f < SPA_FEATURES; f++)
global_feature_count[f] = UINT64_MAX;
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS] = 0;
global_feature_count[SPA_FEATURE_REDACTION_LIST_SPILL] = 0;
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN] = 0;
global_feature_count[SPA_FEATURE_LIVELIST] = 0;
(void) dmu_objset_find(spa_name(spa), dump_one_objset,
NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
if (rc == 0 && !dump_opt['L'])
rc = dump_mos_leaks(spa);
for (f = 0; f < SPA_FEATURES; f++) {
uint64_t refcount;
uint64_t *arr;
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
if (global_feature_count[f] == UINT64_MAX)
continue;
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(global_feature_count[f]);
continue;
}
arr = global_feature_count;
} else {
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(dataset_feature_count[f]);
continue;
}
arr = dataset_feature_count;
}
if (feature_get_refcount(spa, &spa_feature_table[f],
&refcount) == ENOTSUP)
continue;
if (arr[f] != refcount) {
(void) printf("%s feature refcount mismatch: "
"%lld consumers != %lld refcount\n",
spa_feature_table[f].fi_uname,
(longlong_t)arr[f], (longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified %s feature refcount "
"of %llu is correct\n",
spa_feature_table[f].fi_uname,
(longlong_t)refcount);
}
}
if (rc == 0)
rc = verify_device_removal_feature_counts(spa);
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
rc = dump_block_stats(spa);
if (rc == 0)
rc = verify_spacemap_refcounts(spa);
if (dump_opt['s'])
show_pool_stats(spa);
if (dump_opt['h'])
dump_history(spa);
if (rc == 0)
rc = verify_checkpoint(spa);
if (rc != 0) {
dump_debug_buffer();
zdb_exit(rc);
}
}
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static int flagbits[256];
static char flagbitstr[16];
static void
zdb_print_blkptr(const blkptr_t *bp, int flags)
{
char blkbuf[BP_SPRINTF_LEN];
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static void
zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
{
int i;
for (i = 0; i < nbps; i++)
zdb_print_blkptr(&bp[i], flags);
}
static void
zdb_dump_gbh(void *buf, int flags)
{
zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
}
static void
zdb_dump_block_raw(void *buf, uint64_t size, int flags)
{
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array(buf, size);
VERIFY(write(fileno(stdout), buf, size) == size);
}
static void
zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
{
uint64_t *d = (uint64_t *)buf;
unsigned nwords = size / sizeof (uint64_t);
int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
unsigned i, j;
const char *hdr;
char *c;
if (do_bswap)
hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
else
hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
(void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
#ifdef _ZFS_LITTLE_ENDIAN
/* correct the endianness */
do_bswap = !do_bswap;
#endif
for (i = 0; i < nwords; i += 2) {
(void) printf("%06llx: %016llx %016llx ",
(u_longlong_t)(i * sizeof (uint64_t)),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
c = (char *)&d[i];
for (j = 0; j < 2 * sizeof (uint64_t); j++)
(void) printf("%c", isprint(c[j]) ? c[j] : '.');
(void) printf("\n");
}
}
/*
* There are two acceptable formats:
* leaf_name - For example: c1t0d0 or /tmp/ztest.0a
* child[.child]* - For example: 0.1.1
*
* The second form can be used to specify arbitrary vdevs anywhere
* in the hierarchy. For example, in a pool with a mirror of
* RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
*/
static vdev_t *
zdb_vdev_lookup(vdev_t *vdev, const char *path)
{
char *s, *p, *q;
unsigned i;
if (vdev == NULL)
return (NULL);
/* First, assume the x.x.x.x format */
i = strtoul(path, &s, 10);
if (s == path || (s && *s != '.' && *s != '\0'))
goto name;
if (i >= vdev->vdev_children)
return (NULL);
vdev = vdev->vdev_child[i];
if (s && *s == '\0')
return (vdev);
return (zdb_vdev_lookup(vdev, s+1));
name:
for (i = 0; i < vdev->vdev_children; i++) {
vdev_t *vc = vdev->vdev_child[i];
if (vc->vdev_path == NULL) {
vc = zdb_vdev_lookup(vc, path);
if (vc == NULL)
continue;
else
return (vc);
}
p = strrchr(vc->vdev_path, '/');
p = p ? p + 1 : vc->vdev_path;
q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
if (strcmp(vc->vdev_path, path) == 0)
return (vc);
if (strcmp(p, path) == 0)
return (vc);
if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
return (vc);
}
return (NULL);
}
static int
name_from_objset_id(spa_t *spa, uint64_t objset_id, char *outstr)
{
dsl_dataset_t *ds;
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
int error = dsl_dataset_hold_obj(spa->spa_dsl_pool, objset_id,
NULL, &ds);
if (error != 0) {
(void) fprintf(stderr, "failed to hold objset %llu: %s\n",
(u_longlong_t)objset_id, strerror(error));
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (error);
}
dsl_dataset_name(ds, outstr);
dsl_dataset_rele(ds, NULL);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (0);
}
static boolean_t
zdb_parse_block_sizes(char *sizes, uint64_t *lsize, uint64_t *psize)
{
char *s0, *s1, *tmp = NULL;
if (sizes == NULL)
return (B_FALSE);
s0 = strtok_r(sizes, "/", &tmp);
if (s0 == NULL)
return (B_FALSE);
s1 = strtok_r(NULL, "/", &tmp);
*lsize = strtoull(s0, NULL, 16);
*psize = s1 ? strtoull(s1, NULL, 16) : *lsize;
return (*lsize >= *psize && *psize > 0);
}
#define ZIO_COMPRESS_MASK(alg) (1ULL << (ZIO_COMPRESS_##alg))
static boolean_t
try_decompress_block(abd_t *pabd, uint64_t lsize, uint64_t psize,
int flags, int cfunc, void *lbuf, void *lbuf2)
{
if (flags & ZDB_FLAG_VERBOSE) {
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize,
(u_longlong_t)lsize,
zio_compress_table[cfunc].ci_name);
}
/*
* We set lbuf to all zeros and lbuf2 to all
* ones, then decompress to both buffers and
* compare their contents. This way we can
* know if decompression filled exactly to
* lsize or if it left some bytes unwritten.
*/
memset(lbuf, 0x00, lsize);
memset(lbuf2, 0xff, lsize);
abd_t labd, labd2;
abd_get_from_buf_struct(&labd, lbuf, lsize);
abd_get_from_buf_struct(&labd2, lbuf2, lsize);
boolean_t ret = B_FALSE;
if (zio_decompress_data(cfunc, pabd,
&labd, psize, lsize, NULL) == 0 &&
zio_decompress_data(cfunc, pabd,
&labd2, psize, lsize, NULL) == 0 &&
memcmp(lbuf, lbuf2, lsize) == 0)
ret = B_TRUE;
abd_free(&labd2);
abd_free(&labd);
return (ret);
}
static uint64_t
zdb_decompress_block(abd_t *pabd, void *buf, void *lbuf, uint64_t lsize,
uint64_t psize, int flags)
{
(void) buf;
uint64_t orig_lsize = lsize;
boolean_t tryzle = ((getenv("ZDB_NO_ZLE") == NULL));
boolean_t found = B_FALSE;
/*
* We don't know how the data was compressed, so just try
* every decompress function at every inflated blocksize.
*/
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
int cfuncs[ZIO_COMPRESS_FUNCTIONS] = { 0 };
int *cfuncp = cfuncs;
uint64_t maxlsize = SPA_MAXBLOCKSIZE;
uint64_t mask = ZIO_COMPRESS_MASK(ON) | ZIO_COMPRESS_MASK(OFF) |
ZIO_COMPRESS_MASK(INHERIT) | ZIO_COMPRESS_MASK(EMPTY) |
ZIO_COMPRESS_MASK(ZLE);
*cfuncp++ = ZIO_COMPRESS_LZ4;
*cfuncp++ = ZIO_COMPRESS_LZJB;
mask |= ZIO_COMPRESS_MASK(LZ4) | ZIO_COMPRESS_MASK(LZJB);
/*
* Every gzip level has the same decompressor, no need to
* run it 9 times per bruteforce attempt.
*/
mask |= ZIO_COMPRESS_MASK(GZIP_2) | ZIO_COMPRESS_MASK(GZIP_3);
mask |= ZIO_COMPRESS_MASK(GZIP_4) | ZIO_COMPRESS_MASK(GZIP_5);
mask |= ZIO_COMPRESS_MASK(GZIP_6) | ZIO_COMPRESS_MASK(GZIP_7);
mask |= ZIO_COMPRESS_MASK(GZIP_8) | ZIO_COMPRESS_MASK(GZIP_9);
for (int c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++)
if (((1ULL << c) & mask) == 0)
*cfuncp++ = c;
/*
* On the one hand, with SPA_MAXBLOCKSIZE at 16MB, this
* could take a while and we should let the user know
* we are not stuck. On the other hand, printing progress
* info gets old after a while. User can specify 'v' flag
* to see the progression.
*/
if (lsize == psize)
lsize += SPA_MINBLOCKSIZE;
else
maxlsize = lsize;
for (; lsize <= maxlsize; lsize += SPA_MINBLOCKSIZE) {
for (cfuncp = cfuncs; *cfuncp; cfuncp++) {
if (try_decompress_block(pabd, lsize, psize, flags,
*cfuncp, lbuf, lbuf2)) {
found = B_TRUE;
break;
}
}
if (*cfuncp != 0)
break;
}
if (!found && tryzle) {
for (lsize = orig_lsize; lsize <= maxlsize;
lsize += SPA_MINBLOCKSIZE) {
if (try_decompress_block(pabd, lsize, psize, flags,
ZIO_COMPRESS_ZLE, lbuf, lbuf2)) {
*cfuncp = ZIO_COMPRESS_ZLE;
found = B_TRUE;
break;
}
}
}
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
if (*cfuncp == ZIO_COMPRESS_ZLE) {
printf("\nZLE decompression was selected. If you "
"suspect the results are wrong,\ntry avoiding ZLE "
"by setting and exporting ZDB_NO_ZLE=\"true\"\n");
}
return (lsize > maxlsize ? -1 : lsize);
}
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
*
* pool:vdev_specifier:offset:[lsize/]psize[:flags]
*
* pool - The name of the pool you wish to read from
* vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
* offset - offset, in hex, in bytes
* size - Amount of data to read, in hex, in bytes
* flags - A string of characters specifying options
* b: Decode a blkptr at given offset within block
* c: Calculate and display checksums
* d: Decompress data before dumping
* e: Byteswap data before dumping
* g: Display data as a gang block header
* i: Display as an indirect block
* r: Dump raw data to stdout
* v: Verbose
*
*/
static void
zdb_read_block(char *thing, spa_t *spa)
{
blkptr_t blk, *bp = &blk;
dva_t *dva = bp->blk_dva;
int flags = 0;
uint64_t offset = 0, psize = 0, lsize = 0, blkptr_offset = 0;
zio_t *zio;
vdev_t *vd;
abd_t *pabd;
void *lbuf, *buf;
char *s, *p, *dup, *flagstr, *sizes, *tmp = NULL;
const char *vdev, *errmsg = NULL;
int i, len, error;
boolean_t borrowed = B_FALSE, found = B_FALSE;
dup = strdup(thing);
s = strtok_r(dup, ":", &tmp);
vdev = s ?: "";
s = strtok_r(NULL, ":", &tmp);
offset = strtoull(s ? s : "", NULL, 16);
sizes = strtok_r(NULL, ":", &tmp);
s = strtok_r(NULL, ":", &tmp);
flagstr = strdup(s ?: "");
if (!zdb_parse_block_sizes(sizes, &lsize, &psize))
errmsg = "invalid size(s)";
if (!IS_P2ALIGNED(psize, DEV_BSIZE) || !IS_P2ALIGNED(lsize, DEV_BSIZE))
errmsg = "size must be a multiple of sector size";
if (!IS_P2ALIGNED(offset, DEV_BSIZE))
errmsg = "offset must be a multiple of sector size";
if (errmsg) {
(void) printf("Invalid block specifier: %s - %s\n",
thing, errmsg);
goto done;
}
tmp = NULL;
for (s = strtok_r(flagstr, ":", &tmp);
s != NULL;
s = strtok_r(NULL, ":", &tmp)) {
len = strlen(flagstr);
for (i = 0; i < len; i++) {
int bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
(void) printf("***Ignoring flag: %c\n",
(uchar_t)flagstr[i]);
continue;
}
found = B_TRUE;
flags |= bit;
p = &flagstr[i + 1];
if (*p != ':' && *p != '\0') {
int j = 0, nextbit = flagbits[(uchar_t)*p];
char *end, offstr[8] = { 0 };
if ((bit == ZDB_FLAG_PRINT_BLKPTR) &&
(nextbit == 0)) {
/* look ahead to isolate the offset */
while (nextbit == 0 &&
strchr(flagbitstr, *p) == NULL) {
offstr[j] = *p;
j++;
if (i + j > strlen(flagstr))
break;
p++;
nextbit = flagbits[(uchar_t)*p];
}
blkptr_offset = strtoull(offstr, &end,
16);
i += j;
} else if (nextbit == 0) {
(void) printf("***Ignoring flag arg:"
" '%c'\n", (uchar_t)*p);
}
}
}
}
if (blkptr_offset % sizeof (blkptr_t)) {
printf("Block pointer offset 0x%llx "
"must be divisible by 0x%x\n",
(longlong_t)blkptr_offset, (int)sizeof (blkptr_t));
goto done;
}
if (found == B_FALSE && strlen(flagstr) > 0) {
printf("Invalid flag arg: '%s'\n", flagstr);
goto done;
}
vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
if (vd == NULL) {
(void) printf("***Invalid vdev: %s\n", vdev);
goto done;
} else {
if (vd->vdev_path)
(void) fprintf(stderr, "Found vdev: %s\n",
vd->vdev_path);
else
(void) fprintf(stderr, "Found vdev type: %s\n",
vd->vdev_ops->vdev_op_type);
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
BP_ZERO(bp);
DVA_SET_VDEV(&dva[0], vd->vdev_id);
DVA_SET_OFFSET(&dva[0], offset);
DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(bp, lsize);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(bp, DMU_OT_NONE);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
zio = zio_root(spa, NULL, NULL, 0);
if (vd == vd->vdev_top) {
/*
* Treat this as a normal block read.
*/
zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
} else {
/*
* Treat this as a vdev child I/O.
*/
zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL,
NULL, NULL));
}
error = zio_wait(zio);
spa_config_exit(spa, SCL_STATE, FTAG);
if (error) {
(void) printf("Read of %s failed, error: %d\n", thing, error);
goto out;
}
uint64_t orig_lsize = lsize;
buf = lbuf;
if (flags & ZDB_FLAG_DECOMPRESS) {
lsize = zdb_decompress_block(pabd, buf, lbuf,
lsize, psize, flags);
if (lsize == -1) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
} else {
buf = abd_borrow_buf_copy(pabd, lsize);
borrowed = B_TRUE;
}
/*
* Try to detect invalid block pointer. If invalid, try
* decompressing.
*/
if ((flags & ZDB_FLAG_PRINT_BLKPTR || flags & ZDB_FLAG_INDIRECT) &&
!(flags & ZDB_FLAG_DECOMPRESS)) {
const blkptr_t *b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (zfs_blkptr_verify(spa, b,
BLK_CONFIG_NEEDED, BLK_VERIFY_ONLY) == B_FALSE) {
abd_return_buf_copy(pabd, buf, lsize);
borrowed = B_FALSE;
buf = lbuf;
lsize = zdb_decompress_block(pabd, buf,
lbuf, lsize, psize, flags);
b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (lsize == -1 || zfs_blkptr_verify(spa, b,
BLK_CONFIG_NEEDED, BLK_VERIFY_LOG) == B_FALSE) {
printf("invalid block pointer at this DVA\n");
goto out;
}
}
}
if (flags & ZDB_FLAG_PRINT_BLKPTR)
zdb_print_blkptr((blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
else if (flags & ZDB_FLAG_RAW)
zdb_dump_block_raw(buf, lsize, flags);
else if (flags & ZDB_FLAG_INDIRECT)
zdb_dump_indirect((blkptr_t *)buf,
orig_lsize / sizeof (blkptr_t), flags);
else if (flags & ZDB_FLAG_GBH)
zdb_dump_gbh(buf, flags);
else
zdb_dump_block(thing, buf, lsize, flags);
/*
* If :c was specified, iterate through the checksum table to
* calculate and display each checksum for our specified
* DVA and length.
*/
if ((flags & ZDB_FLAG_CHECKSUM) && !(flags & ZDB_FLAG_RAW) &&
!(flags & ZDB_FLAG_GBH)) {
zio_t *czio;
(void) printf("\n");
for (enum zio_checksum ck = ZIO_CHECKSUM_LABEL;
ck < ZIO_CHECKSUM_FUNCTIONS; ck++) {
if ((zio_checksum_table[ck].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED) ||
ck == ZIO_CHECKSUM_NOPARITY) {
continue;
}
BP_SET_CHECKSUM(bp, ck);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
czio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
if (vd == vd->vdev_top) {
zio_nowait(zio_read(czio, spa, bp, pabd, psize,
NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_DONT_RETRY, NULL));
} else {
zio_nowait(zio_vdev_child_io(czio, bp, vd,
offset, pabd, psize, ZIO_TYPE_READ,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(czio);
if (error == 0 || error == ECKSUM) {
zio_t *ck_zio = zio_null(NULL, spa, NULL,
NULL, NULL, 0);
ck_zio->io_offset =
DVA_GET_OFFSET(&bp->blk_dva[0]);
ck_zio->io_bp = bp;
zio_checksum_compute(ck_zio, ck, pabd, lsize);
printf(
"%12s\t"
"cksum=%016llx:%016llx:%016llx:%016llx\n",
zio_checksum_table[ck].ci_name,
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
zio_wait(ck_zio);
} else {
printf("error %d reading block\n", error);
}
spa_config_exit(spa, SCL_STATE, FTAG);
}
}
if (borrowed)
abd_return_buf_copy(pabd, buf, lsize);
out:
abd_free(pabd);
umem_free(lbuf, SPA_MAXBLOCKSIZE);
done:
free(flagstr);
free(dup);
}
static void
zdb_embedded_block(char *thing)
{
blkptr_t bp = {{{{0}}}};
unsigned long long *words = (void *)&bp;
char *buf;
int err;
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
words + 4, words + 5, words + 6, words + 7,
words + 8, words + 9, words + 10, words + 11,
words + 12, words + 13, words + 14, words + 15);
if (err != 16) {
(void) fprintf(stderr, "invalid input format\n");
zdb_exit(1);
}
ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
zdb_exit(1);
}
err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
if (err != 0) {
(void) fprintf(stderr, "decode failed: %u\n", err);
zdb_exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
free(buf);
}
/* check for valid hex or decimal numeric string */
static boolean_t
zdb_numeric(char *str)
{
int i = 0, len;
len = strlen(str);
if (len == 0)
return (B_FALSE);
if (strncmp(str, "0x", 2) == 0 || strncmp(str, "0X", 2) == 0)
i = 2;
for (; i < len; i++) {
if (!isxdigit(str[i]))
return (B_FALSE);
}
return (B_TRUE);
}
static int
dummy_get_file_info(dmu_object_type_t bonustype, const void *data,
zfs_file_info_t *zoi)
{
(void) data, (void) zoi;
if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
return (ENOENT);
(void) fprintf(stderr, "dummy_get_file_info: not implemented");
abort();
}
int
main(int argc, char **argv)
{
int c;
int dump_all = 1;
int verbose = 0;
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
char *target, *target_pool, dsname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int64_t objset_id = -1;
uint64_t object;
int flags = ZFS_IMPORT_MISSING_LOG;
int rewind = ZPOOL_NEVER_REWIND;
char *spa_config_path_env, *objset_str;
boolean_t target_is_spa = B_TRUE, dataset_lookup = B_FALSE;
nvlist_t *cfg = NULL;
struct sigaction action;
boolean_t force_import = B_FALSE;
boolean_t config_path_console = B_FALSE;
char pbuf[MAXPATHLEN];
dprintf_setup(&argc, argv);
/*
* Set up signal handlers, so if we crash due to bad on-disk data we
* can get more info. Unlike ztest, we don't bail out if we can't set
* up signal handlers, because zdb is very useful without them.
*/
action.sa_handler = sig_handler;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
if (sigaction(SIGSEGV, &action, NULL) < 0) {
(void) fprintf(stderr, "zdb: cannot catch SIGSEGV: %s\n",
strerror(errno));
}
if (sigaction(SIGABRT, &action, NULL) < 0) {
(void) fprintf(stderr, "zdb: cannot catch SIGABRT: %s\n",
strerror(errno));
}
/*
* If there is an environment variable SPA_CONFIG_PATH it overrides
* default spa_config_path setting. If -U flag is specified it will
* override this environment variable settings once again.
*/
spa_config_path_env = getenv("SPA_CONFIG_PATH");
if (spa_config_path_env != NULL)
spa_config_path = spa_config_path_env;
/*
* For performance reasons, we set this tunable down. We do so before
* the arg parsing section so that the user can override this value if
* they choose.
*/
zfs_btree_verify_intensity = 3;
struct option long_options[] = {
{"ignore-assertions", no_argument, NULL, 'A'},
{"block-stats", no_argument, NULL, 'b'},
{"backup", no_argument, NULL, 'B'},
{"checksum", no_argument, NULL, 'c'},
{"config", no_argument, NULL, 'C'},
{"datasets", no_argument, NULL, 'd'},
{"dedup-stats", no_argument, NULL, 'D'},
{"exported", no_argument, NULL, 'e'},
{"embedded-block-pointer", no_argument, NULL, 'E'},
{"automatic-rewind", no_argument, NULL, 'F'},
{"dump-debug-msg", no_argument, NULL, 'G'},
{"history", no_argument, NULL, 'h'},
{"intent-logs", no_argument, NULL, 'i'},
{"inflight", required_argument, NULL, 'I'},
{"checkpointed-state", no_argument, NULL, 'k'},
{"key", required_argument, NULL, 'K'},
{"label", no_argument, NULL, 'l'},
{"disable-leak-tracking", no_argument, NULL, 'L'},
{"metaslabs", no_argument, NULL, 'm'},
{"metaslab-groups", no_argument, NULL, 'M'},
{"numeric", no_argument, NULL, 'N'},
{"option", required_argument, NULL, 'o'},
{"object-lookups", no_argument, NULL, 'O'},
{"path", required_argument, NULL, 'p'},
{"parseable", no_argument, NULL, 'P'},
{"skip-label", no_argument, NULL, 'q'},
{"copy-object", no_argument, NULL, 'r'},
{"read-block", no_argument, NULL, 'R'},
{"io-stats", no_argument, NULL, 's'},
{"simulate-dedup", no_argument, NULL, 'S'},
{"txg", required_argument, NULL, 't'},
{"brt-stats", no_argument, NULL, 'T'},
{"uberblock", no_argument, NULL, 'u'},
{"cachefile", required_argument, NULL, 'U'},
{"verbose", no_argument, NULL, 'v'},
{"verbatim", no_argument, NULL, 'V'},
{"dump-blocks", required_argument, NULL, 'x'},
{"extreme-rewind", no_argument, NULL, 'X'},
{"all-reconstruction", no_argument, NULL, 'Y'},
{"livelist", no_argument, NULL, 'y'},
{"zstd-headers", no_argument, NULL, 'Z'},
{0, 0, 0, 0}
};
while ((c = getopt_long(argc, argv,
"AbBcCdDeEFGhiI:kK:lLmMNo:Op:PqrRsSt:TuU:vVx:XYyZ",
long_options, NULL)) != -1) {
switch (c) {
case 'b':
case 'B':
case 'c':
case 'C':
case 'd':
case 'D':
case 'E':
case 'G':
case 'h':
case 'i':
case 'l':
case 'm':
case 'M':
case 'N':
case 'O':
case 'r':
case 'R':
case 's':
case 'S':
case 'T':
case 'u':
case 'y':
case 'Z':
dump_opt[c]++;
dump_all = 0;
break;
case 'A':
case 'e':
case 'F':
case 'k':
case 'L':
case 'P':
case 'q':
case 'X':
dump_opt[c]++;
break;
case 'Y':
zfs_reconstruct_indirect_combinations_max = INT_MAX;
zfs_deadman_enabled = 0;
break;
/* NB: Sort single match options below. */
case 'I':
max_inflight_bytes = strtoull(optarg, NULL, 0);
if (max_inflight_bytes == 0) {
(void) fprintf(stderr, "maximum number "
"of inflight bytes must be greater "
"than 0\n");
usage();
}
break;
case 'K':
dump_opt[c]++;
key_material = strdup(optarg);
/* redact key material in process table */
while (*optarg != '\0') { *optarg++ = '*'; }
break;
case 'o':
error = set_global_var(optarg);
if (error != 0)
usage();
break;
case 'p':
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *),
UMEM_NOFAIL);
} else {
char **tmp = umem_alloc((nsearch + 1) *
sizeof (char *), UMEM_NOFAIL);
memcpy(tmp, searchdirs, nsearch *
sizeof (char *));
umem_free(searchdirs,
nsearch * sizeof (char *));
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 't':
max_txg = strtoull(optarg, NULL, 0);
if (max_txg < TXG_INITIAL) {
(void) fprintf(stderr, "incorrect txg "
"specified: %s\n", optarg);
usage();
}
break;
case 'U':
config_path_console = B_TRUE;
spa_config_path = optarg;
if (spa_config_path[0] != '/') {
(void) fprintf(stderr,
"cachefile must be an absolute path "
"(i.e. start with a slash)\n");
usage();
}
break;
case 'v':
verbose++;
break;
case 'V':
flags = ZFS_IMPORT_VERBATIM;
break;
case 'x':
vn_dumpdir = optarg;
break;
default:
usage();
break;
}
}
if (!dump_opt['e'] && searchdirs != NULL) {
(void) fprintf(stderr, "-p option requires use of -e\n");
usage();
}
#if defined(_LP64)
/*
* ZDB does not typically re-read blocks; therefore limit the ARC
* to 256 MB, which can be used entirely for metadata.
*/
zfs_arc_min = 2ULL << SPA_MAXBLOCKSHIFT;
zfs_arc_max = 256 * 1024 * 1024;
#endif
/*
* "zdb -c" uses checksum-verifying scrub i/os which are async reads.
* "zdb -b" uses traversal prefetch which uses async reads.
* For good performance, let several of them be active at once.
*/
zfs_vdev_async_read_max_active = 10;
/*
* Disable reference tracking for better performance.
*/
reference_tracking_enable = B_FALSE;
/*
* Do not fail spa_load when spa_load_verify fails. This is needed
* to load non-idle pools.
*/
spa_load_verify_dryrun = B_TRUE;
/*
* ZDB should have ability to read spacemaps.
*/
spa_mode_readable_spacemaps = B_TRUE;
if (dump_all)
verbose = MAX(verbose, 1);
for (c = 0; c < 256; c++) {
if (dump_all && strchr("ABeEFkKlLNOPrRSXy", c) == NULL)
dump_opt[c] = 1;
if (dump_opt[c])
dump_opt[c] += verbose;
}
libspl_set_assert_ok((dump_opt['A'] == 1) || (dump_opt['A'] > 2));
zfs_recover = (dump_opt['A'] > 1);
argc -= optind;
argv += optind;
if (argc < 2 && dump_opt['R'])
usage();
target = argv[0];
/*
* Automate cachefile
*/
if (!spa_config_path_env && !config_path_console && target &&
libzfs_core_init() == 0) {
char *pname = strdup(target);
const char *value;
nvlist_t *pnvl = NULL;
nvlist_t *vnvl = NULL;
if (strpbrk(pname, "/@") != NULL)
*strpbrk(pname, "/@") = '\0';
if (pname && lzc_get_props(pname, &pnvl) == 0) {
if (nvlist_lookup_nvlist(pnvl, "cachefile",
&vnvl) == 0) {
value = fnvlist_lookup_string(vnvl,
ZPROP_VALUE);
} else {
value = "-";
}
strlcpy(pbuf, value, sizeof (pbuf));
if (pbuf[0] != '\0') {
if (pbuf[0] == '/') {
if (access(pbuf, F_OK) == 0)
spa_config_path = pbuf;
else
force_import = B_TRUE;
} else if ((strcmp(pbuf, "-") == 0 &&
access(ZPOOL_CACHE, F_OK) != 0) ||
strcmp(pbuf, "none") == 0) {
force_import = B_TRUE;
}
}
nvlist_free(vnvl);
}
free(pname);
nvlist_free(pnvl);
libzfs_core_fini();
}
dmu_objset_register_type(DMU_OST_ZFS, dummy_get_file_info);
kernel_init(SPA_MODE_READ);
kernel_init_done = B_TRUE;
if (dump_opt['E']) {
if (argc != 1)
usage();
zdb_embedded_block(argv[0]);
error = 0;
goto fini;
}
if (argc < 1) {
if (!dump_opt['e'] && dump_opt['C']) {
dump_cachefile(spa_config_path);
error = 0;
goto fini;
}
usage();
}
if (dump_opt['l']) {
error = dump_label(argv[0]);
goto fini;
}
if (dump_opt['X'] || dump_opt['F'])
rewind = ZPOOL_DO_REWIND |
(dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
/* -N implies -d */
if (dump_opt['N'] && dump_opt['d'] == 0)
dump_opt['d'] = dump_opt['N'];
if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_pool = strdup(target);
*strpbrk(target_pool, "/@") = '\0';
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
/*
* See if an objset ID was supplied (-d <pool>/<objset ID>).
* To disambiguate tank/100, consider the 100 as objsetID
* if -N was given, otherwise 100 is an objsetID iff
* tank/100 as a named dataset fails on lookup.
*/
objset_str = strchr(target, '/');
if (objset_str && strlen(objset_str) > 1 &&
zdb_numeric(objset_str + 1)) {
char *endptr;
errno = 0;
objset_str++;
objset_id = strtoull(objset_str, &endptr, 0);
/* dataset 0 is the same as opening the pool */
if (errno == 0 && endptr != objset_str &&
objset_id != 0) {
if (dump_opt['N'])
dataset_lookup = B_TRUE;
}
/* normal dataset name not an objset ID */
if (endptr == objset_str) {
objset_id = -1;
}
} else if (objset_str && !zdb_numeric(objset_str + 1) &&
dump_opt['N']) {
printf("Supply a numeric objset ID with -N\n");
error = 1;
goto fini;
}
} else {
target_pool = target;
}
if (dump_opt['e'] || force_import) {
importargs_t args = { 0 };
/*
* If path is not provided, search in /dev
*/
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *), UMEM_NOFAIL);
searchdirs[nsearch++] = (char *)ZFS_DEVDIR;
}
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;
libpc_handle_t lpch = {
.lpc_lib_handle = NULL,
.lpc_ops = &libzpool_config_ops,
.lpc_printerr = B_TRUE
};
error = zpool_find_config(&lpch, target_pool, &cfg, &args);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_LOAD_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
/*
* Disable the activity check to allow examination of
* active pools.
*/
error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
if (searchdirs != NULL) {
umem_free(searchdirs, nsearch * sizeof (char *));
searchdirs = NULL;
}
/*
* We need to make sure to process -O option or call
* dump_path after the -e option has been processed,
* which imports the pool to the namespace if it's
* not in the cachefile.
*/
if (dump_opt['O']) {
if (argc != 2)
usage();
dump_opt['v'] = verbose + 3;
error = dump_path(argv[0], argv[1], NULL);
goto fini;
}
if (dump_opt['r']) {
target_is_spa = B_FALSE;
if (argc != 3)
usage();
dump_opt['v'] = verbose;
error = dump_path(argv[0], argv[1], &object);
if (error != 0)
fatal("internal error: %s", strerror(error));
}
/*
* import_checkpointed_state makes the assumption that the
* target pool that we pass it is already part of the spa
* namespace. Because of that we need to make sure to call
* it always after the -e option has been processed, which
* imports the pool to the namespace if it's not in the
* cachefile.
*/
char *checkpoint_pool = NULL;
char *checkpoint_target = NULL;
if (dump_opt['k']) {
checkpoint_pool = import_checkpointed_state(target, cfg,
&checkpoint_target);
if (checkpoint_target != NULL)
target = checkpoint_target;
}
if (cfg != NULL) {
nvlist_free(cfg);
cfg = NULL;
}
if (target_pool != target)
free(target_pool);
if (error == 0) {
if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
ASSERT(checkpoint_pool != NULL);
ASSERT(checkpoint_target == NULL);
error = spa_open(checkpoint_pool, &spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but "
"spa_open() failed with error %d\n",
checkpoint_pool, error);
}
} else if (target_is_spa || dump_opt['R'] || dump_opt['B'] ||
objset_id == 0) {
zdb_set_skip_mmp(target);
error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
/*
* If we're missing the log device then
* try opening the pool after clearing the
* log state.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL &&
spa->spa_log_state == SPA_LOG_MISSING) {
spa->spa_log_state = SPA_LOG_CLEAR;
error = 0;
}
mutex_exit(&spa_namespace_lock);
if (!error) {
error = spa_open_rewind(target, &spa,
FTAG, policy, NULL);
}
}
} else if (strpbrk(target, "#") != NULL) {
dsl_pool_t *dp;
error = dsl_pool_hold(target, FTAG, &dp);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
error = dump_bookmark(dp, target, B_TRUE, verbose > 1);
dsl_pool_rele(dp, FTAG);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
goto fini;
} else {
target_pool = strdup(target);
if (strpbrk(target, "/@") != NULL)
*strpbrk(target_pool, "/@") = '\0';
zdb_set_skip_mmp(target);
/*
* If -N was supplied, the user has indicated that
* zdb -d <pool>/<objsetID> is in effect. Otherwise
* we first assume that the dataset string is the
* dataset name. If dmu_objset_hold fails with the
* dataset string, and we have an objset_id, retry the
* lookup with the objsetID.
*/
boolean_t retry = B_TRUE;
retry_lookup:
if (dataset_lookup == B_TRUE) {
/*
* Use the supplied id to get the name
* for open_objset.
*/
error = spa_open(target_pool, &spa, FTAG);
if (error == 0) {
error = name_from_objset_id(spa,
objset_id, dsname);
spa_close(spa, FTAG);
if (error == 0)
target = dsname;
}
}
if (error == 0) {
if (objset_id > 0 && retry) {
int err = dmu_objset_hold(target, FTAG,
&os);
if (err) {
dataset_lookup = B_TRUE;
retry = B_FALSE;
goto retry_lookup;
} else {
dmu_objset_rele(os, FTAG);
}
}
error = open_objset(target, FTAG, &os);
}
if (error == 0)
spa = dmu_objset_spa(os);
free(target_pool);
}
}
nvlist_free(policy);
if (error)
fatal("can't open '%s': %s", target, strerror(error));
/*
* Set the pool failure mode to panic in order to prevent the pool
* from suspending. A suspended I/O will have no way to resume and
* can prevent the zdb(8) command from terminating as expected.
*/
if (spa != NULL)
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
argv++;
argc--;
if (dump_opt['r']) {
error = zdb_copy_object(os, object, argv[1]);
} else if (!dump_opt['R']) {
flagbits['d'] = ZOR_FLAG_DIRECTORY;
flagbits['f'] = ZOR_FLAG_PLAIN_FILE;
flagbits['m'] = ZOR_FLAG_SPACE_MAP;
flagbits['z'] = ZOR_FLAG_ZAP;
flagbits['A'] = ZOR_FLAG_ALL_TYPES;
if (argc > 0 && dump_opt['d']) {
zopt_object_args = argc;
zopt_object_ranges = calloc(zopt_object_args,
sizeof (zopt_object_range_t));
for (unsigned i = 0; i < zopt_object_args; i++) {
int err;
const char *msg = NULL;
err = parse_object_range(argv[i],
&zopt_object_ranges[i], &msg);
if (err != 0)
fatal("Bad object or range: '%s': %s\n",
argv[i], msg ?: "");
}
} else if (argc > 0 && dump_opt['m']) {
zopt_metaslab_args = argc;
zopt_metaslab = calloc(zopt_metaslab_args,
sizeof (uint64_t));
for (unsigned i = 0; i < zopt_metaslab_args; i++) {
errno = 0;
zopt_metaslab[i] = strtoull(argv[i], NULL, 0);
if (zopt_metaslab[i] == 0 && errno != 0)
fatal("bad number %s: %s", argv[i],
strerror(errno));
}
}
if (dump_opt['B']) {
dump_backup(target, objset_id,
argc > 0 ? argv[0] : NULL);
} else if (os != NULL) {
dump_objset(os);
} else if (zopt_object_args > 0 && !dump_opt['m']) {
dump_objset(spa->spa_meta_objset);
} else {
dump_zpool(spa);
}
} else {
flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
flagbits['c'] = ZDB_FLAG_CHECKSUM;
flagbits['d'] = ZDB_FLAG_DECOMPRESS;
flagbits['e'] = ZDB_FLAG_BSWAP;
flagbits['g'] = ZDB_FLAG_GBH;
flagbits['i'] = ZDB_FLAG_INDIRECT;
flagbits['r'] = ZDB_FLAG_RAW;
flagbits['v'] = ZDB_FLAG_VERBOSE;
for (int i = 0; i < argc; i++)
zdb_read_block(argv[i], spa);
}
if (dump_opt['k']) {
free(checkpoint_pool);
if (!target_is_spa)
free(checkpoint_target);
}
fini:
if (spa != NULL)
zdb_ddt_cleanup(spa);
if (os != NULL) {
close_objset(os, FTAG);
} else if (spa != NULL) {
spa_close(spa, FTAG);
}
fuid_table_destroy();
dump_debug_buffer();
if (kernel_init_done)
kernel_fini();
return (error);
}
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index 506427a10672..5fcf0991de66 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -1,13743 +1,13743 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2024 by Delphix. All rights reserved.
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
* Copyright (c) 2012 by Cyril Plisko. All rights reserved.
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2021, 2023, Klara Inc.
* Copyright [2021] Hewlett Packard Enterprise Development LP
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <locale.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <thread_pool.h>
#include <time.h>
#include <unistd.h>
#include <pwd.h>
#include <zone.h>
#include <sys/wait.h>
#include <zfs_prop.h>
#include <sys/fs/zfs.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/zfs_ioctl.h>
#include <sys/mount.h>
#include <sys/sysmacros.h>
#include <string.h>
#include <math.h>
#include <libzfs.h>
#include <libzutil.h>
#include "zpool_util.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
#include "zfs_valstr.h"
#include "statcommon.h"
libzfs_handle_t *g_zfs;
static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
static int zpool_do_create(int, char **);
static int zpool_do_destroy(int, char **);
static int zpool_do_add(int, char **);
static int zpool_do_remove(int, char **);
static int zpool_do_labelclear(int, char **);
static int zpool_do_checkpoint(int, char **);
static int zpool_do_prefetch(int, char **);
static int zpool_do_list(int, char **);
static int zpool_do_iostat(int, char **);
static int zpool_do_status(int, char **);
static int zpool_do_online(int, char **);
static int zpool_do_offline(int, char **);
static int zpool_do_clear(int, char **);
static int zpool_do_reopen(int, char **);
static int zpool_do_reguid(int, char **);
static int zpool_do_attach(int, char **);
static int zpool_do_detach(int, char **);
static int zpool_do_replace(int, char **);
static int zpool_do_split(int, char **);
static int zpool_do_initialize(int, char **);
static int zpool_do_scrub(int, char **);
static int zpool_do_resilver(int, char **);
static int zpool_do_trim(int, char **);
static int zpool_do_import(int, char **);
static int zpool_do_export(int, char **);
static int zpool_do_upgrade(int, char **);
static int zpool_do_history(int, char **);
static int zpool_do_events(int, char **);
static int zpool_do_get(int, char **);
static int zpool_do_set(int, char **);
static int zpool_do_sync(int, char **);
static int zpool_do_version(int, char **);
static int zpool_do_wait(int, char **);
static int zpool_do_ddt_prune(int, char **);
static int zpool_do_help(int argc, char **argv);
static zpool_compat_status_t zpool_do_load_compat(
const char *, boolean_t *);
enum zpool_options {
ZPOOL_OPTION_POWER = 1024,
ZPOOL_OPTION_ALLOW_INUSE,
ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,
ZPOOL_OPTION_POOL_KEY_GUID,
ZPOOL_OPTION_JSON_NUMS_AS_INT,
ZPOOL_OPTION_JSON_FLAT_VDEVS
};
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_ADD,
HELP_ATTACH,
HELP_CLEAR,
HELP_CREATE,
HELP_CHECKPOINT,
HELP_DDT_PRUNE,
HELP_DESTROY,
HELP_DETACH,
HELP_EXPORT,
HELP_HISTORY,
HELP_IMPORT,
HELP_IOSTAT,
HELP_LABELCLEAR,
HELP_LIST,
HELP_OFFLINE,
HELP_ONLINE,
HELP_PREFETCH,
HELP_REPLACE,
HELP_REMOVE,
HELP_INITIALIZE,
HELP_SCRUB,
HELP_RESILVER,
HELP_TRIM,
HELP_STATUS,
HELP_UPGRADE,
HELP_EVENTS,
HELP_GET,
HELP_SET,
HELP_SPLIT,
HELP_SYNC,
HELP_REGUID,
HELP_REOPEN,
HELP_VERSION,
HELP_WAIT
} zpool_help_t;
/*
* Flags for stats to display with "zpool iostats"
*/
enum iostat_type {
IOS_DEFAULT = 0,
IOS_LATENCY = 1,
IOS_QUEUES = 2,
IOS_L_HISTO = 3,
IOS_RQ_HISTO = 4,
IOS_COUNT, /* always last element */
};
/* iostat_type entries as bitmasks */
#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
#define IOS_LATENCY_M (1ULL << IOS_LATENCY)
#define IOS_QUEUES_M (1ULL << IOS_QUEUES)
#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
/* Mask of all the histo bits */
#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
/*
* Lookup table for iostat flags to nvlist names. Basically a list
* of all the nvlists a flag requires. Also specifies the order in
* which data gets printed in zpool iostat.
*/
static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
[IOS_L_HISTO] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
NULL},
[IOS_LATENCY] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
NULL},
[IOS_QUEUES] = {
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
NULL},
[IOS_RQ_HISTO] = {
ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
NULL},
};
static const char *pool_scan_func_str[] = {
"NONE",
"SCRUB",
"RESILVER",
"ERRORSCRUB"
};
static const char *pool_scan_state_str[] = {
"NONE",
"SCANNING",
"FINISHED",
"CANCELED",
"ERRORSCRUBBING"
};
static const char *vdev_rebuild_state_str[] = {
"NONE",
"ACTIVE",
"CANCELED",
"COMPLETE"
};
static const char *checkpoint_state_str[] = {
"NONE",
"EXISTS",
"DISCARDING"
};
static const char *vdev_state_str[] = {
"UNKNOWN",
"CLOSED",
"OFFLINE",
"REMOVED",
"CANT_OPEN",
"FAULTED",
"DEGRADED",
"ONLINE"
};
static const char *vdev_aux_str[] = {
"NONE",
"OPEN_FAILED",
"CORRUPT_DATA",
"NO_REPLICAS",
"BAD_GUID_SUM",
"TOO_SMALL",
"BAD_LABEL",
"VERSION_NEWER",
"VERSION_OLDER",
"UNSUP_FEAT",
"SPARED",
"ERR_EXCEEDED",
"IO_FAILURE",
"BAD_LOG",
"EXTERNAL",
"SPLIT_POOL",
"BAD_ASHIFT",
"EXTERNAL_PERSIST",
"ACTIVE",
"CHILDREN_OFFLINE",
"ASHIFT_TOO_BIG"
};
static const char *vdev_init_state_str[] = {
"NONE",
"ACTIVE",
"CANCELED",
"SUSPENDED",
"COMPLETE"
};
static const char *vdev_trim_state_str[] = {
"NONE",
"ACTIVE",
"CANCELED",
"SUSPENDED",
"COMPLETE"
};
#define ZFS_NICE_TIMESTAMP 100
/*
* Given a cb->cb_flags with a histogram bit set, return the iostat_type.
* Right now, only one histo bit is ever set at one time, so we can
* just do a highbit64(a)
*/
#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
typedef struct zpool_command {
const char *name;
int (*func)(int, char **);
zpool_help_t usage;
} zpool_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zpool_command_t command_table[] = {
{ "version", zpool_do_version, HELP_VERSION },
{ NULL },
{ "create", zpool_do_create, HELP_CREATE },
{ "destroy", zpool_do_destroy, HELP_DESTROY },
{ NULL },
{ "add", zpool_do_add, HELP_ADD },
{ "remove", zpool_do_remove, HELP_REMOVE },
{ NULL },
{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
{ NULL },
{ "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
{ "prefetch", zpool_do_prefetch, HELP_PREFETCH },
{ NULL },
{ "list", zpool_do_list, HELP_LIST },
{ "iostat", zpool_do_iostat, HELP_IOSTAT },
{ "status", zpool_do_status, HELP_STATUS },
{ NULL },
{ "online", zpool_do_online, HELP_ONLINE },
{ "offline", zpool_do_offline, HELP_OFFLINE },
{ "clear", zpool_do_clear, HELP_CLEAR },
{ "reopen", zpool_do_reopen, HELP_REOPEN },
{ NULL },
{ "attach", zpool_do_attach, HELP_ATTACH },
{ "detach", zpool_do_detach, HELP_DETACH },
{ "replace", zpool_do_replace, HELP_REPLACE },
{ "split", zpool_do_split, HELP_SPLIT },
{ NULL },
{ "initialize", zpool_do_initialize, HELP_INITIALIZE },
{ "resilver", zpool_do_resilver, HELP_RESILVER },
{ "scrub", zpool_do_scrub, HELP_SCRUB },
{ "trim", zpool_do_trim, HELP_TRIM },
{ NULL },
{ "import", zpool_do_import, HELP_IMPORT },
{ "export", zpool_do_export, HELP_EXPORT },
{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },
{ "reguid", zpool_do_reguid, HELP_REGUID },
{ NULL },
{ "history", zpool_do_history, HELP_HISTORY },
{ "events", zpool_do_events, HELP_EVENTS },
{ NULL },
{ "get", zpool_do_get, HELP_GET },
{ "set", zpool_do_set, HELP_SET },
{ "sync", zpool_do_sync, HELP_SYNC },
{ NULL },
{ "wait", zpool_do_wait, HELP_WAIT },
{ NULL },
{ "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE },
};
#define NCOMMAND (ARRAY_SIZE(command_table))
#define VDEV_ALLOC_CLASS_LOGS "logs"
#define MAX_CMD_LEN 256
static zpool_command_t *current_command;
static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static uint_t timestamp_fmt = NODATE;
static const char *
get_usage(zpool_help_t idx)
{
switch (idx) {
case HELP_ADD:
return (gettext("\tadd [-afgLnP] [-o property=value] "
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-fsw] [-o property=value] "
"<pool> <device> <new-device>\n"));
case HELP_CLEAR:
return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
case HELP_CREATE:
return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
"\t [-O file-system-property=value] ... \n"
"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
case HELP_CHECKPOINT:
return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-f] <pool>\n"));
case HELP_DETACH:
return (gettext("\tdetach <pool> <device>\n"));
case HELP_EXPORT:
return (gettext("\texport [-af] <pool> ...\n"));
case HELP_HISTORY:
return (gettext("\thistory [-il] [<pool>] ...\n"));
case HELP_IMPORT:
return (gettext("\timport [-d dir] [-D]\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]] -a\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]]\n"
"\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
case HELP_IOSTAT:
return (gettext("\tiostat [[[-c [script1,script2,...]"
"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
" [[-n] interval [count]]\n"));
case HELP_LABELCLEAR:
return (gettext("\tlabelclear [-f] <vdev>\n"));
case HELP_LIST:
return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "
"[--json-int, --json-pool-key-guid]] ...\n"
"\t [-T d|u] [pool] [interval [count]]\n"));
case HELP_PREFETCH:
return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n"
"\t -t ddt <pool>\n"));
case HELP_OFFLINE:
return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
"<device> ...\n"));
case HELP_ONLINE:
return (gettext("\tonline [--power][-e] <pool> <device> "
"...\n"));
case HELP_REPLACE:
return (gettext("\treplace [-fsw] [-o property=value] "
"<pool> <device> [new-device]\n"));
case HELP_REMOVE:
return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
case HELP_REOPEN:
return (gettext("\treopen [-n] <pool>\n"));
case HELP_INITIALIZE:
return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
"[<device> ...]\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-e | -s | -p | -C] [-w] "
"<pool> ...\n"));
case HELP_RESILVER:
return (gettext("\tresilver <pool> ...\n"));
case HELP_TRIM:
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
"[<device> ...]\n"));
case HELP_STATUS:
return (gettext("\tstatus [--power] [-j [--json-int, "
"--json-flat-vdevs, ...\n"
"\t --json-pool-key-guid]] [-c [script1,script2,...]] "
"[-dDegiLpPstvx] ...\n"
"\t [-T d|u] [pool] [interval [count]]\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade\n"
"\tupgrade -v\n"
"\tupgrade [-V version] <-a | pool ...>\n"));
case HELP_EVENTS:
return (gettext("\tevents [-vHf [pool] | -c]\n"));
case HELP_GET:
return (gettext("\tget [-Hp] [-j [--json-int, "
"--json-pool-key-guid]] ...\n"
"\t [-o \"all\" | field[,...]] "
"<\"all\" | property[,...]> <pool> ...\n"));
case HELP_SET:
return (gettext("\tset <property=value> <pool>\n"
"\tset <vdev_property=value> <pool> <vdev>\n"));
case HELP_SPLIT:
return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
"\t [-o property=value] <pool> <newpool> "
"[<device> ...]\n"));
case HELP_REGUID:
return (gettext("\treguid [-g guid] <pool>\n"));
case HELP_SYNC:
return (gettext("\tsync [pool] ...\n"));
case HELP_VERSION:
return (gettext("\tversion [-j]\n"));
case HELP_WAIT:
return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
"<pool> [interval]\n"));
case HELP_DDT_PRUNE:
return (gettext("\tddtprune -d|-p <amount> <pool>\n"));
default:
__builtin_unreachable();
}
}
static void
zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
{
uint_t children = 0;
nvlist_t **child;
uint_t i;
(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (children == 0) {
char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
VDEV_NAME_PATH);
if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
strcmp(path, VDEV_TYPE_HOLE) != 0)
fnvlist_add_boolean(res, path);
free(path);
return;
}
for (i = 0; i < children; i++) {
zpool_collect_leaves(zhp, child[i], res);
}
}
/*
* Callback routine that will print out a pool property value.
*/
static int
print_pool_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
if (zpool_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (zpool_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Callback routine that will print out a vdev property value.
*/
static int
print_vdev_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
if (vdev_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (vdev_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", vdev_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
* '/dev/disk/by-vdev/L5'.
*/
static const char *
vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
{
nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
if (vdev_nv == NULL) {
return (NULL);
}
return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
}
static int
zpool_power_on(zpool_handle_t *zhp, char *vdev)
{
return (zpool_power(zhp, vdev, B_TRUE));
}
static int
zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
{
int rc;
rc = zpool_power_on(zhp, vdev);
if (rc != 0)
return (rc);
zpool_disk_wait(vdev_name_to_path(zhp, vdev));
return (0);
}
static int
zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
{
nvlist_t *nv;
const char *path = NULL;
int rc;
/* Power up all the devices first */
FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
if (path != NULL) {
rc = zpool_power_on(zhp, (char *)path);
if (rc != 0) {
return (rc);
}
}
}
/*
* Wait for their devices to show up. Since we powered them on
* at roughly the same time, they should all come online around
* the same time.
*/
FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
zpool_disk_wait(path);
}
return (0);
}
static int
zpool_power_off(zpool_handle_t *zhp, char *vdev)
{
return (zpool_power(zhp, vdev, B_FALSE));
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static __attribute__((noreturn)) void
usage(boolean_t requested)
{
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
int i;
(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
(void) fprintf(fp,
gettext("\nFor further help on a command or topic, "
"run: %s\n"), "zpool help [<topic>]");
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
((strcmp(current_command->name, "set") == 0) ||
(strcmp(current_command->name, "get") == 0) ||
(strcmp(current_command->name, "list") == 0))) {
(void) fprintf(fp, "%s",
gettext("\nthe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-19s %s %s\n\n",
"PROPERTY", "EDIT", "VALUES");
/* Iterate over all properties */
if (current_prop_type == ZFS_TYPE_POOL) {
(void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
B_TRUE, current_prop_type);
(void) fprintf(fp, "\t%-19s ", "feature@...");
(void) fprintf(fp, "YES "
"disabled | enabled | active\n");
(void) fprintf(fp, gettext("\nThe feature@ properties "
"must be appended with a feature name.\n"
"See zpool-features(7).\n"));
} else if (current_prop_type == ZFS_TYPE_VDEV) {
(void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
B_TRUE, current_prop_type);
}
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
* if none specified.
*
* -c Cancel. Ends active initializing.
* -s Suspend. Initializing can then be restarted with no flags.
* -u Uninitialize. Clears initialization state.
* -w Wait. Blocks until initializing has completed.
*/
int
zpool_do_initialize(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
nvlist_t *vdevs;
int err = 0;
boolean_t wait = B_FALSE;
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"suspend", no_argument, NULL, 's'},
{"uninit", no_argument, NULL, 'u'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
while ((c = getopt_long(argc, argv, "csuw", long_options,
NULL)) != -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_CANCEL;
break;
case 's':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_SUSPEND;
break;
case 'u':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_UNINIT) {
(void) fprintf(stderr, gettext("-u cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_UNINIT;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_INITIALIZE_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
"or -u\n"));
usage(B_FALSE);
}
poolname = argv[0];
zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
} else {
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
if (wait)
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
else
err = zpool_initialize(zhp, cmd_type, vdevs);
fnvlist_free(vdevs);
zpool_close(zhp);
return (err);
}
/*
* print a pool vdev config for dry runs
*/
static void
print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
const char *match, int name_flags)
{
nvlist_t **child;
uint_t c, children;
char *vname;
boolean_t printed = B_FALSE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
if (name != NULL)
(void) printf("\t%*s%s\n", indent, "", name);
return;
}
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
const char *class = "";
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole == B_TRUE) {
continue;
}
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
class = VDEV_ALLOC_BIAS_LOG;
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
if (strcmp(match, class) != 0)
continue;
if (!printed && name != NULL) {
(void) printf("\t%*s%s\n", indent, "", name);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
print_vdev_tree(zhp, vname, child[c], indent + 2, "",
name_flags);
free(vname);
}
}
/*
* Print the list of l2cache devices for dry runs.
*/
static void
print_cache_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "cache");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
/*
* Print the list of spares for dry runs.
*/
static void
print_spare_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "spares");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
typedef struct spare_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
} spare_cbdata_t;
static boolean_t
find_vdev(nvlist_t *nv, uint64_t search)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
search == guid)
return (B_TRUE);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (find_vdev(child[c], search))
return (B_TRUE);
}
return (B_FALSE);
}
static int
find_spare(zpool_handle_t *zhp, void *data)
{
spare_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (find_vdev(nvroot, cbp->cb_guid)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
static void
nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,
boolean_t literal, boolean_t as_int, int format)
{
char buf[256];
if (literal) {
if (!as_int)
snprintf(buf, 256, "%llu", (u_longlong_t)value);
} else {
switch (format) {
case ZFS_NICENUM_1024:
zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);
break;
case ZFS_NICENUM_BYTES:
zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);
break;
case ZFS_NICENUM_TIME:
zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);
break;
case ZFS_NICE_TIMESTAMP:
format_timestamp(value, buf, 256);
break;
default:
fprintf(stderr, "Invalid number format");
exit(1);
}
}
if (as_int)
fnvlist_add_uint64(item, key, value);
else
fnvlist_add_string(item, key, buf);
}
/*
* Generates an nvlist with output version for every command based on params.
* Purpose of this is to add a version of JSON output, considering the schema
* format might be updated for each command in future.
*
* Schema:
*
* "output_version": {
* "command": string,
* "vers_major": integer,
* "vers_minor": integer,
* }
*/
static nvlist_t *
zpool_json_schema(int maj_v, int min_v)
{
char cmd[MAX_CMD_LEN];
nvlist_t *sch = fnvlist_alloc();
nvlist_t *ov = fnvlist_alloc();
snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);
fnvlist_add_string(ov, "command", cmd);
fnvlist_add_uint32(ov, "vers_major", maj_v);
fnvlist_add_uint32(ov, "vers_minor", min_v);
fnvlist_add_nvlist(sch, "output_version", ov);
fnvlist_free(ov);
return (sch);
}
static void
fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,
boolean_t as_int)
{
nvlist_t *config = zpool_get_config(zhp, NULL);
uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);
fnvlist_add_string(list, "name", zpool_get_name(zhp));
if (addtype)
fnvlist_add_string(list, "type", "POOL");
fnvlist_add_string(list, "state", zpool_get_state_str(zhp));
if (as_int) {
if (guid)
fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid);
if (txg)
fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg);
fnvlist_add_uint64(list, "spa_version", SPA_VERSION);
fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION);
} else {
char value[ZFS_MAXPROPLEN];
if (guid) {
snprintf(value, ZFS_MAXPROPLEN, "%llu",
(u_longlong_t)guid);
fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);
}
if (txg) {
snprintf(value, ZFS_MAXPROPLEN, "%llu",
(u_longlong_t)txg);
fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);
}
fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING);
fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING);
}
}
static void
used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)
{
spare_cbdata_t spare_cb;
verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,
&spare_cb.cb_guid) == 0);
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
zpool_get_name(zhp)) != 0) {
fnvlist_add_string(list, "used_by",
zpool_get_name(spare_cb.cb_zhp));
}
zpool_close(spare_cb.cb_zhp);
}
}
static void
fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
boolean_t addtype, boolean_t as_int)
{
boolean_t l2c = B_FALSE;
const char *path, *phys, *devid, *bias = NULL;
uint64_t hole = 0, log = 0, spare = 0;
vdev_stat_t *vs;
uint_t c;
nvlist_t *nvdev;
nvlist_t *nvdev_parent = NULL;
char *_name;
if (strcmp(name, zpool_get_name(zhp)) != 0)
_name = name;
else
_name = (char *)"root-0";
nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL);
fnvlist_add_string(list, "name", name);
if (addtype)
fnvlist_add_string(list, "type", "VDEV");
if (nvdev) {
const char *type = fnvlist_lookup_string(nvdev,
ZPOOL_CONFIG_TYPE);
if (type)
fnvlist_add_string(list, "vdev_type", type);
uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID);
if (guid) {
if (as_int) {
fnvlist_add_uint64(list, "guid", guid);
} else {
char buf[ZFS_MAXPROPLEN];
snprintf(buf, ZFS_MAXPROPLEN, "%llu",
(u_longlong_t)guid);
fnvlist_add_string(list, "guid", buf);
}
}
if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)
fnvlist_add_string(list, "path", path);
if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,
&phys) == 0)
fnvlist_add_string(list, "phys_path", phys);
if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,
&devid) == 0)
fnvlist_add_string(list, "devid", devid);
(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);
(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,
&spare);
(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);
if (hole)
fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);
else if (l2c)
fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);
else if (spare)
fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);
else if (log)
fnvlist_add_string(list, "class", VDEV_TYPE_LOG);
else {
(void) nvlist_lookup_string(nvdev,
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
if (bias != NULL)
fnvlist_add_string(list, "class", bias);
else {
nvdev_parent = NULL;
nvdev_parent = zpool_find_parent_vdev(zhp,
_name, NULL, NULL, NULL);
/*
* With a mirrored special device, the parent
* "mirror" vdev will have
* ZPOOL_CONFIG_ALLOCATION_BIAS set to "special"
* not the leaf vdevs. If we're a leaf vdev
* in that case we need to look at our parent
* to see if they're "special" to know if we
* are "special" too.
*/
if (nvdev_parent) {
(void) nvlist_lookup_string(
nvdev_parent,
ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias);
}
if (bias != NULL)
fnvlist_add_string(list, "class", bias);
else
fnvlist_add_string(list, "class",
"normal");
}
}
if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0) {
fnvlist_add_string(list, "state",
vdev_state_str[vs->vs_state]);
}
}
}
static boolean_t
prop_list_contains_feature(nvlist_t *proplist)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
nvp = nvlist_next_nvpair(proplist, nvp)) {
if (zpool_prop_feature(nvpair_name(nvp)))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a property pair (name, string-value) into a property nvlist.
*/
static int
add_prop_list(const char *propname, const char *propval, nvlist_t **props,
boolean_t poolprop)
{
zpool_prop_t prop = ZPOOL_PROP_INVAL;
nvlist_t *proplist;
const char *normnm;
const char *strval;
if (*props == NULL &&
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
(void) fprintf(stderr,
gettext("internal error: out of memory\n"));
return (1);
}
proplist = *props;
if (poolprop) {
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
const char *cname =
zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
(!zpool_prop_feature(propname) &&
!zpool_prop_vdev(propname))) {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid pool or vdev property\n"), propname);
return (2);
}
/*
* feature@ properties and version should not be specified
* at the same time.
*/
if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
prop_list_contains_feature(proplist))) {
(void) fprintf(stderr, gettext("'feature@' and "
"'version' properties cannot be specified "
"together\n"));
return (2);
}
/*
* if version is specified, only "legacy" compatibility
* may be requested
*/
if ((prop == ZPOOL_PROP_COMPATIBILITY &&
strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
nvlist_exists(proplist, cname) &&
strcmp(fnvlist_lookup_string(proplist, cname),
ZPOOL_COMPAT_LEGACY) != 0)) {
(void) fprintf(stderr, gettext("when 'version' is "
"specified, the 'compatibility' feature may only "
"be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
return (2);
}
if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
normnm = propname;
else
normnm = zpool_prop_to_name(prop);
} else {
zfs_prop_t fsprop = zfs_name_to_prop(propname);
if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
B_FALSE)) {
normnm = zfs_prop_to_name(fsprop);
} else if (zfs_prop_user(propname) ||
zfs_prop_userquota(propname)) {
normnm = propname;
} else {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid filesystem property\n"), propname);
return (2);
}
}
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
prop != ZPOOL_PROP_CACHEFILE) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (2);
}
if (nvlist_add_string(proplist, normnm, propval) != 0) {
(void) fprintf(stderr, gettext("internal "
"error: out of memory\n"));
return (1);
}
return (0);
}
/*
* Set a default property pair (name, string-value) in a property nvlist
*/
static int
add_prop_list_default(const char *propname, const char *propval,
nvlist_t **props)
{
const char *pval;
if (nvlist_lookup_string(*props, propname, &pval) == 0)
return (0);
return (add_prop_list(propname, propval, props, B_TRUE));
}
/*
* zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...
*
* -a Disable the ashift validation checks
* -f Force addition of devices, even if they appear in use
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not add the devices, but display the resulting layout if
* they were to be added.
* -o Set property=value.
* -P Display full path for vdev name.
*
* Adds the given vdevs to 'pool'. As with create, the bulk of this work is
* handled by make_root_vdev(), which constructs the nvlist needed to pass to
* libzfs.
*/
int
zpool_do_add(int argc, char **argv)
{
boolean_t check_replication = B_TRUE;
boolean_t check_inuse = B_TRUE;
boolean_t dryrun = B_FALSE;
boolean_t check_ashift = B_TRUE;
boolean_t force = B_FALSE;
int name_flags = 0;
int c;
nvlist_t *nvroot;
char *poolname;
int ret;
zpool_handle_t *zhp;
nvlist_t *config;
nvlist_t *props = NULL;
char *propval;
struct option long_options[] = {
{"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},
{"allow-replication-mismatch", no_argument, NULL,
ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},
{"allow-ashift-mismatch", no_argument, NULL,
ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))
!= -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'g':
name_flags |= VDEV_NAME_GUID;
break;
case 'L':
name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 'P':
name_flags |= VDEV_NAME_PATH;
break;
case ZPOOL_OPTION_ALLOW_INUSE:
check_inuse = B_FALSE;
break;
case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:
check_replication = B_FALSE;
break;
case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:
check_ashift = B_FALSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
usage(B_FALSE);
}
if (force) {
if (!check_inuse || !check_replication || !check_ashift) {
(void) fprintf(stderr, gettext("'-f' option is not "
"allowed with '--allow-replication-mismatch', "
"'--allow-ashift-mismatch', or "
"'--allow-in-use'\n"));
usage(B_FALSE);
}
check_inuse = B_FALSE;
check_replication = B_FALSE;
check_ashift = B_FALSE;
}
poolname = argv[0];
argc--;
argv++;
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
/* pass off to make_root_vdev for processing */
nvroot = make_root_vdev(zhp, props, !check_inuse,
check_replication, B_FALSE, dryrun, argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
return (1);
}
if (dryrun) {
nvlist_t *poolnvroot;
nvlist_t **l2child, **sparechild;
uint_t l2children, sparechildren, c;
char *vname;
boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&poolnvroot) == 0);
(void) printf(gettext("would update '%s' to the following "
"configuration:\n\n"), zpool_get_name(zhp));
/* print original main pool and new tree */
print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
name_flags | VDEV_NAME_TYPE_ID);
print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
/* print other classes: 'dedup', 'special', and 'log' */
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", poolnvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
}
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", poolnvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
}
if (num_logs(poolnvroot) > 0) {
print_vdev_tree(zhp, "logs", poolnvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
} else if (num_logs(nvroot) > 0) {
print_vdev_tree(zhp, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
}
/* Do the same for the caches */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
hadcache = B_TRUE;
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
if (!hadcache)
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
/* And finally the spares */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
hadspare = B_TRUE;
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
if (!hadspare)
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
ret = 0;
} else {
ret = (zpool_add(zhp, nvroot, check_ashift) != 0);
}
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool remove [-npsw] <pool> <vdev> ...
*
* Removes the given vdev from the pool.
*/
int
zpool_do_remove(int argc, char **argv)
{
char *poolname;
int i, ret = 0;
zpool_handle_t *zhp = NULL;
boolean_t stop = B_FALSE;
int c;
boolean_t noop = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t wait = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "npsw")) != -1) {
switch (c) {
case 'n':
noop = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 's':
stop = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if (stop && noop) {
zpool_close(zhp);
(void) fprintf(stderr, gettext("stop request ignored\n"));
return (0);
}
if (stop) {
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (zpool_vdev_remove_cancel(zhp) != 0)
ret = 1;
if (wait) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -w cannot be used with -s\n"));
usage(B_FALSE);
}
} else {
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device\n"));
usage(B_FALSE);
}
for (i = 1; i < argc; i++) {
if (noop) {
uint64_t size;
if (zpool_vdev_indirect_size(zhp, argv[i],
&size) != 0) {
ret = 1;
break;
}
if (parsable) {
(void) printf("%s %llu\n",
argv[i], (unsigned long long)size);
} else {
char valstr[32];
zfs_nicenum(size, valstr,
sizeof (valstr));
(void) printf("Memory that will be "
"used after removing %s: %s\n",
argv[i], valstr);
}
} else {
if (zpool_vdev_remove(zhp, argv[i]) != 0)
ret = 1;
}
}
if (ret == 0 && wait)
ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
}
zpool_close(zhp);
return (ret);
}
/*
* Return 1 if a vdev is active (being used in a pool)
* Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
*
* This is useful for checking if a disk in an active pool is offlined or
* faulted.
*/
static int
vdev_is_active(char *vdev_path)
{
int fd;
fd = open(vdev_path, O_EXCL);
if (fd < 0) {
return (1); /* cant open O_EXCL - disk is active */
}
close(fd);
return (0); /* disk is inactive in the pool */
}
/*
* zpool labelclear [-f] <vdev>
*
* -f Force clearing the label for the vdevs which are members of
* the exported or foreign pools.
*
* Verifies that the vdev is not active and zeros out the label information
* on the device.
*/
int
zpool_do_labelclear(int argc, char **argv)
{
char vdev[MAXPATHLEN];
char *name = NULL;
int c, fd = -1, ret = 0;
nvlist_t *config;
pool_state_t state;
boolean_t inuse = B_FALSE;
boolean_t force = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get vdev name */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing vdev name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
(void) strlcpy(vdev, argv[0], sizeof (vdev));
/*
* If we cannot open an absolute path, we quit.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
if ((fd = open(vdev, O_RDWR)) < 0) {
int error;
if (vdev[0] == '/') {
(void) fprintf(stderr, gettext("failed to open "
"%s: %s\n"), vdev, strerror(errno));
return (1);
}
error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
if (errno == ENOENT) {
(void) fprintf(stderr, gettext(
"failed to find device %s, try "
"specifying absolute path instead\n"),
argv[0]);
return (1);
}
(void) fprintf(stderr, gettext("failed to open %s:"
" %s\n"), vdev, strerror(errno));
return (1);
}
}
/*
* Flush all dirty pages for the block device. This should not be
* fatal when the device does not support BLKFLSBUF as would be the
* case for a file vdev.
*/
if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
(void) fprintf(stderr, gettext("failed to invalidate "
"cache for %s: %s\n"), vdev, strerror(errno));
if (zpool_read_label(fd, &config, NULL) != 0) {
(void) fprintf(stderr,
gettext("failed to read label from %s\n"), vdev);
ret = 1;
goto errout;
}
nvlist_free(config);
ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to check state for %s\n"), vdev);
ret = 1;
goto errout;
}
if (!inuse)
goto wipe_label;
switch (state) {
default:
case POOL_STATE_ACTIVE:
case POOL_STATE_SPARE:
case POOL_STATE_L2CACHE:
/*
* We allow the user to call 'zpool offline -f'
* on an offlined disk in an active pool. We can check if
* the disk is online by calling vdev_is_active().
*/
if (force && !vdev_is_active(vdev))
break;
(void) fprintf(stderr, gettext(
"%s is a member (%s) of pool \"%s\""),
vdev, zpool_pool_state_to_name(state), name);
if (force) {
(void) fprintf(stderr, gettext(
". Offline the disk first to clear its label."));
}
printf("\n");
ret = 1;
goto errout;
case POOL_STATE_EXPORTED:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of exported pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_POTENTIALLY_ACTIVE:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of potentially active pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_DESTROYED:
/* inuse should never be set for a destroyed pool */
assert(0);
break;
}
wipe_label:
ret = zpool_clear_label(fd);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to clear label for %s\n"), vdev);
}
errout:
free(name);
(void) close(fd);
return (ret);
}
/*
* zpool create [-fnd] [-o property=value] ...
* [-O file-system-property=value] ...
* [-R root] [-m mountpoint] <pool> <dev> ...
*
* -f Force creation, even if devices appear in use
* -n Do not create the pool, but display the resulting layout if it
* were to be created.
* -R Create a pool under an alternate root
* -m Set default mountpoint for the root dataset. By default it's
* '/<pool>'
* -o Set property=value.
* -o Set feature@feature=enabled|disabled.
* -d Don't automatically enable all supported pool features
* (individual features can be enabled with -o).
* -O Set fsproperty=value in the pool's root file system
*
* Creates the named pool according to the given vdev specification. The
* bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
* Once we get the nvlist back from make_root_vdev(), we either print out the
* contents (if '-n' was specified), or pass it to libzfs to do the creation.
*/
int
zpool_do_create(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t enable_pool_features = B_TRUE;
int c;
nvlist_t *nvroot = NULL;
char *poolname;
char *tname = NULL;
int ret = 1;
char *altroot = NULL;
char *compat = NULL;
char *mountpoint = NULL;
nvlist_t *fsprops = NULL;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'd':
enable_pool_features = B_FALSE;
break;
case 'R':
altroot = optarg;
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto errout;
break;
case 'm':
/* Equivalent to -O mountpoint=optarg */
mountpoint = optarg;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
goto errout;
}
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval, &props, B_TRUE))
goto errout;
/*
* If the user is creating a pool that doesn't support
* feature flags, don't enable any features.
*/
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
char *end;
u_longlong_t ver;
ver = strtoull(propval, &end, 0);
if (*end == '\0' &&
ver < SPA_VERSION_FEATURES) {
enable_pool_features = B_FALSE;
}
}
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
altroot = propval;
if (zpool_name_to_prop(optarg) ==
ZPOOL_PROP_COMPATIBILITY)
compat = propval;
break;
case 'O':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -O option\n"));
goto errout;
}
*propval = '\0';
propval++;
/*
* Mountpoints are checked and then added later.
* Uniquely among properties, they can be specified
* more than once, to avoid conflict with -m.
*/
if (0 == strcmp(optarg,
zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
mountpoint = propval;
} else if (add_prop_list(optarg, propval, &fsprops,
B_FALSE)) {
goto errout;
}
break;
case 't':
/*
* Sanity check temporary pool name.
*/
if (strchr(optarg, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create "
"'%s': invalid character '/' in temporary "
"name\n"), optarg);
(void) fprintf(stderr, gettext("use 'zfs "
"create' to create a dataset\n"));
goto errout;
}
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto errout;
tname = optarg;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
goto badusage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
goto badusage;
}
poolname = argv[0];
/*
* As a special case, check for use of '/' in the name, and direct the
* user to use 'zfs create' instead.
*/
if (strchr(poolname, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create '%s': invalid "
"character '/' in pool name\n"), poolname);
(void) fprintf(stderr, gettext("use 'zfs create' to "
"create a dataset\n"));
goto errout;
}
/* pass off to make_root_vdev for bulk processing */
nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
argc - 1, argv + 1);
if (nvroot == NULL)
goto errout;
/* make_root_vdev() allows 0 toplevel children if there are spares */
if (!zfs_allocatable_devs(nvroot)) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: at least one toplevel vdev must be "
"specified\n"));
goto errout;
}
if (altroot != NULL && altroot[0] != '/') {
(void) fprintf(stderr, gettext("invalid alternate root '%s': "
"must be an absolute path\n"), altroot);
goto errout;
}
/*
* Check the validity of the mountpoint and direct the user to use the
* '-m' mountpoint option if it looks like its in use.
*/
if (mountpoint == NULL ||
(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
char buf[MAXPATHLEN];
DIR *dirp;
if (mountpoint && mountpoint[0] != '/') {
(void) fprintf(stderr, gettext("invalid mountpoint "
"'%s': must be an absolute path, 'legacy', or "
"'none'\n"), mountpoint);
goto errout;
}
if (mountpoint == NULL) {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s/%s",
altroot, poolname);
else
(void) snprintf(buf, sizeof (buf), "/%s",
poolname);
} else {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s%s",
altroot, mountpoint);
else
(void) snprintf(buf, sizeof (buf), "%s",
mountpoint);
}
if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
(void) fprintf(stderr, gettext("mountpoint '%s' : "
"%s\n"), buf, strerror(errno));
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a different default\n"));
goto errout;
} else if (dirp) {
int count = 0;
while (count < 3 && readdir(dirp) != NULL)
count++;
(void) closedir(dirp);
if (count > 2) {
(void) fprintf(stderr, gettext("mountpoint "
"'%s' exists and is not empty\n"), buf);
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a "
"different default\n"));
goto errout;
}
}
}
/*
* Now that the mountpoint's validity has been checked, ensure that
* the property is set appropriately prior to creating the pool.
*/
if (mountpoint != NULL) {
ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
mountpoint, &fsprops, B_FALSE);
if (ret != 0)
goto errout;
}
ret = 1;
if (dryrun) {
/*
* For a dry run invocation, print out a basic message and run
* through all the vdevs in the list and print out in an
* appropriate hierarchy.
*/
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), poolname);
print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
print_vdev_tree(NULL, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
print_vdev_tree(NULL, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, 0);
print_cache_list(nvroot, 0);
print_spare_list(nvroot, 0);
ret = 0;
} else {
/*
* Load in feature set.
* Note: if compatibility property not given, we'll have
* NULL, which means 'all features'.
*/
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
goto errout;
/*
* props contains list of features to enable.
* For each feature:
* - remove it if feature@name=disabled
* - leave it there if feature@name=enabled
* - add it if:
* - enable_pool_features (ie: no '-d' or '-o version')
* - it's supported by the kernel module
* - it's in the requested feature set
* - warn if it's enabled but not in compat
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
char propname[MAXPATHLEN];
const char *propval;
zfeature_info_t *feat = &spa_feature_table[i];
(void) snprintf(propname, sizeof (propname),
"feature@%s", feat->fi_uname);
if (!nvlist_lookup_string(props, propname, &propval)) {
if (strcmp(propval,
ZFS_FEATURE_DISABLED) == 0) {
(void) nvlist_remove_all(props,
propname);
} else if (strcmp(propval,
ZFS_FEATURE_ENABLED) == 0 &&
!requested_features[i]) {
(void) fprintf(stderr, gettext(
"Warning: feature \"%s\" enabled "
"but is not in specified "
"'compatibility' feature set.\n"),
feat->fi_uname);
}
} else if (
enable_pool_features &&
feat->fi_zfs_mod_supported &&
requested_features[i]) {
ret = add_prop_list(propname,
ZFS_FEATURE_ENABLED, &props, B_TRUE);
if (ret != 0)
goto errout;
}
}
ret = 1;
if (zpool_create(g_zfs, poolname,
nvroot, props, fsprops) == 0) {
zfs_handle_t *pool = zfs_open(g_zfs,
tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
if (pool != NULL) {
if (zfs_mount(pool, NULL, 0) == 0) {
ret = zfs_share(pool, NULL);
zfs_commit_shares(NULL);
}
zfs_close(pool);
}
} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
(void) fprintf(stderr, gettext("pool name may have "
"been omitted\n"));
}
}
errout:
nvlist_free(nvroot);
nvlist_free(fsprops);
nvlist_free(props);
return (ret);
badusage:
nvlist_free(fsprops);
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zpool destroy <pool>
*
* -f Forcefully unmount any datasets
*
* Destroy the given pool. Automatically unmounts any datasets in the pool.
*/
int
zpool_do_destroy(int argc, char **argv)
{
boolean_t force = B_FALSE;
int c;
char *pool;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
/*
* As a special case, check for use of '/' in the name, and
* direct the user to use 'zfs destroy' instead.
*/
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("use 'zfs destroy' to "
"destroy a dataset\n"));
return (1);
}
if (zpool_disable_datasets(zhp, force) != 0) {
(void) fprintf(stderr, gettext("could not destroy '%s': "
"could not unmount datasets\n"), zpool_get_name(zhp));
zpool_close(zhp);
return (1);
}
/* The history must be logged as part of the export */
log_history = B_FALSE;
ret = (zpool_destroy(zhp, history_str) != 0);
zpool_close(zhp);
return (ret);
}
typedef struct export_cbdata {
tpool_t *tpool;
pthread_mutex_t mnttab_lock;
boolean_t force;
boolean_t hardforce;
int retval;
} export_cbdata_t;
typedef struct {
char *aea_poolname;
export_cbdata_t *aea_cbdata;
} async_export_args_t;
/*
* Export one pool
*/
static int
zpool_export_one(zpool_handle_t *zhp, void *data)
{
export_cbdata_t *cb = data;
/*
* zpool_disable_datasets() is not thread-safe for mnttab access.
* So we serialize access here for 'zpool export -a' parallel case.
*/
if (cb->tpool != NULL)
pthread_mutex_lock(&cb->mnttab_lock);
int retval = zpool_disable_datasets(zhp, cb->force);
if (cb->tpool != NULL)
pthread_mutex_unlock(&cb->mnttab_lock);
if (retval)
return (1);
if (cb->hardforce) {
if (zpool_export_force(zhp, history_str) != 0)
return (1);
} else if (zpool_export(zhp, cb->force, history_str) != 0) {
return (1);
}
return (0);
}
/*
* Asynchronous export request
*/
static void
zpool_export_task(void *arg)
{
async_export_args_t *aea = arg;
zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname);
if (zhp != NULL) {
int ret = zpool_export_one(zhp, aea->aea_cbdata);
if (ret != 0)
aea->aea_cbdata->retval = ret;
zpool_close(zhp);
} else {
aea->aea_cbdata->retval = 1;
}
free(aea->aea_poolname);
free(aea);
}
/*
* Process an export request in parallel
*/
static int
zpool_export_one_async(zpool_handle_t *zhp, void *data)
{
tpool_t *tpool = ((export_cbdata_t *)data)->tpool;
async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));
/* save pool name since zhp will go out of scope */
aea->aea_poolname = strdup(zpool_get_name(zhp));
aea->aea_cbdata = data;
/* ship off actual export to another thread */
if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0)
return (errno); /* unlikely */
else
return (0);
}
/*
* zpool export [-f] <pool> ...
*
* -a Export all pools
* -f Forcefully unmount datasets
*
* Export the given pools. By default, the command will attempt to cleanly
* unmount any active datasets within the pool. If the '-f' flag is specified,
* then the datasets will be forcefully unmounted.
*/
int
zpool_do_export(int argc, char **argv)
{
export_cbdata_t cb;
boolean_t do_all = B_FALSE;
boolean_t force = B_FALSE;
boolean_t hardforce = B_FALSE;
int c, ret;
/* check options */
while ((c = getopt(argc, argv, "afF")) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'f':
force = B_TRUE;
break;
case 'F':
hardforce = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.force = force;
cb.hardforce = hardforce;
cb.tpool = NULL;
cb.retval = 0;
argc -= optind;
argv += optind;
/* The history will be logged as part of the export itself */
log_history = B_FALSE;
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
0, NULL);
pthread_mutex_init(&cb.mnttab_lock, NULL);
/* Asynchronously call zpool_export_one using thread pool */
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_export_one_async, &cb);
tpool_wait(cb.tpool);
tpool_destroy(cb.tpool);
(void) pthread_mutex_destroy(&cb.mnttab_lock);
return (ret | cb.retval);
}
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_export_one, &cb);
return (ret);
}
/*
* Given a vdev configuration, determine the maximum width needed for the device
* name column.
*/
static int
max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
int name_flags)
{
static const char *const subtypes[] =
{ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
max = MAX(strlen(name) + depth, max);
free(name);
nvlist_t **child;
uint_t children;
for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
if (nvlist_lookup_nvlist_array(nv, subtypes[i],
&child, &children) == 0)
for (uint_t c = 0; c < children; ++c)
max = MAX(max_width(zhp, child[c], depth + 2,
max, name_flags), max);
return (max);
}
typedef struct status_cbdata {
int cb_count;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_allpools;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_explain;
boolean_t cb_first;
boolean_t cb_dedup_stats;
boolean_t cb_print_unhealthy;
boolean_t cb_print_status;
boolean_t cb_print_slow_ios;
boolean_t cb_print_dio_verify;
boolean_t cb_print_vdev_init;
boolean_t cb_print_vdev_trim;
vdev_cmd_data_list_t *vcdl;
boolean_t cb_print_power;
boolean_t cb_json;
boolean_t cb_flat_vdevs;
nvlist_t *cb_jsobj;
boolean_t cb_json_as_int;
boolean_t cb_json_pool_key_guid;
} status_cbdata_t;
/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
static boolean_t
is_blank_str(const char *str)
{
for (; str != NULL && *str != '\0'; ++str)
if (!isblank(*str))
return (B_FALSE);
return (B_TRUE);
}
static void
zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,
nvlist_t *item)
{
vdev_cmd_data_t *data;
int i, j, k = 1;
char tmp[256];
const char *val;
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) != 0) ||
(strcmp(vcdl->data[i].pool, pool) != 0))
continue;
data = &vcdl->data[i];
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
val = NULL;
for (int k = 0; k < data->cols_cnt; k++) {
if (strcmp(data->cols[k],
vcdl->uniq_cols[j]) == 0) {
val = data->lines[k];
break;
}
}
if (val == NULL || is_blank_str(val))
val = "-";
fnvlist_add_string(item, vcdl->uniq_cols[j], val);
}
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
if (data->lines[j]) {
snprintf(tmp, 256, "extra_%d", k++);
fnvlist_add_string(item, tmp,
data->lines[j]);
}
}
break;
}
}
/* Print command output lines for specific vdev in a specific pool */
static void
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
{
vdev_cmd_data_t *data;
int i, j;
const char *val;
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) != 0) ||
(strcmp(vcdl->data[i].pool, pool) != 0)) {
/* Not the vdev we're looking for */
continue;
}
data = &vcdl->data[i];
/* Print out all the output values for this vdev */
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
val = NULL;
/* Does this vdev have values for this column? */
for (int k = 0; k < data->cols_cnt; k++) {
if (strcmp(data->cols[k],
vcdl->uniq_cols[j]) == 0) {
/* yes it does, record the value */
val = data->lines[k];
break;
}
}
/*
* Mark empty values with dashes to make output
* awk-able.
*/
if (val == NULL || is_blank_str(val))
val = "-";
printf("%*s", vcdl->uniq_cols_width[j], val);
if (j < vcdl->uniq_cols_cnt - 1)
fputs(" ", stdout);
}
/* Print out any values that aren't in a column at the end */
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
/* Did we have any columns? If so print a spacer. */
if (vcdl->uniq_cols_cnt > 0)
fputs(" ", stdout);
val = data->lines[j];
fputs(val ?: "", stdout);
}
break;
}
}
/*
* Print vdev initialization status for leaves
*/
static void
print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
time_t t = vs->vs_initialize_action_time;
int initialize_pct = 100;
if (vs->vs_initialize_state !=
VDEV_INITIALIZE_COMPLETE) {
initialize_pct = (vs->vs_initialize_bytes_done *
100 / (vs->vs_initialize_bytes_est + 1));
}
(void) ctime_r(&t, tbuf);
tbuf[24] = 0;
switch (vs->vs_initialize_state) {
case VDEV_INITIALIZE_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_INITIALIZE_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_INITIALIZE_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% initialized%s)"),
initialize_pct, zbuf);
} else {
(void) printf(gettext(" (uninitialized)"));
}
} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) printf(gettext(" (initializing)"));
}
}
/*
* Print vdev TRIM status for leaves
*/
static void
print_status_trim(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
time_t t = vs->vs_trim_action_time;
int trim_pct = 100;
if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
trim_pct = (vs->vs_trim_bytes_done *
100 / (vs->vs_trim_bytes_est + 1));
}
(void) ctime_r(&t, tbuf);
tbuf[24] = 0;
switch (vs->vs_trim_state) {
case VDEV_TRIM_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_TRIM_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_TRIM_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% trimmed%s)"),
trim_pct, zbuf);
} else if (vs->vs_trim_notsup) {
(void) printf(gettext(" (trim unsupported)"));
} else {
(void) printf(gettext(" (untrimmed)"));
}
} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
(void) printf(gettext(" (trimming)"));
}
}
/*
* Return the color associated with a health string. This includes returning
* NULL for no color change.
*/
static const char *
health_str_to_color(const char *health)
{
if (strcmp(health, gettext("FAULTED")) == 0 ||
strcmp(health, gettext("SUSPENDED")) == 0 ||
strcmp(health, gettext("UNAVAIL")) == 0) {
return (ANSI_RED);
}
if (strcmp(health, gettext("OFFLINE")) == 0 ||
strcmp(health, gettext("DEGRADED")) == 0 ||
strcmp(health, gettext("REMOVED")) == 0) {
return (ANSI_YELLOW);
}
return (NULL);
}
/*
* Called for each leaf vdev. Returns 0 if the vdev is healthy.
* A vdev is unhealthy if any of the following are true:
* 1) there are read, write, or checksum errors,
* 2) its state is not ONLINE, or
* 3) slow IO reporting was requested (-s) and there are slow IOs.
*/
static int
vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
{
status_cbdata_t *cb = data;
vdev_stat_t *vs;
uint_t vsc;
(void) hdl_data;
if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) != 0)
return (1);
if (vs->vs_checksum_errors || vs->vs_read_errors ||
vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
return (1);
if (cb->cb_print_slow_ios && vs->vs_slow_ios)
return (1);
return (0);
}
/*
* Print out configuration state as requested by status_callback.
*/
static void
print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
{
nvlist_t **child, *root;
uint_t c, i, vsc, children;
pool_scan_stat_t *ps = NULL;
vdev_stat_t *vs;
char rbuf[6], wbuf[6], cbuf[6], dbuf[6];
char *vname;
uint64_t notpresent;
spare_cbdata_t spare_cb;
const char *state;
const char *type;
const char *path = NULL;
const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
*scolor = NULL;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
return;
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
/*
* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
* online drives.
*/
if (vs->vs_aux == VDEV_AUX_SPARED)
state = gettext("INUSE");
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = gettext("AVAIL");
}
/*
* If '-e' is specified then top-level vdevs and their children
* can be pruned if all of their leaves are healthy.
*/
if (cb->cb_print_unhealthy && depth > 0 &&
for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
return;
}
printf_color(health_str_to_color(state),
"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
name, state);
if (!isspare) {
if (vs->vs_read_errors)
rcolor = ANSI_RED;
if (vs->vs_write_errors)
wcolor = ANSI_RED;
if (vs->vs_checksum_errors)
ccolor = ANSI_RED;
if (vs->vs_slow_ios)
scolor = ANSI_BLUE;
if (cb->cb_literal) {
fputc(' ', stdout);
printf_color(rcolor, "%5llu",
(u_longlong_t)vs->vs_read_errors);
fputc(' ', stdout);
printf_color(wcolor, "%5llu",
(u_longlong_t)vs->vs_write_errors);
fputc(' ', stdout);
printf_color(ccolor, "%5llu",
(u_longlong_t)vs->vs_checksum_errors);
} else {
zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
zfs_nicenum(vs->vs_checksum_errors, cbuf,
sizeof (cbuf));
fputc(' ', stdout);
printf_color(rcolor, "%5s", rbuf);
fputc(' ', stdout);
printf_color(wcolor, "%5s", wbuf);
fputc(' ', stdout);
printf_color(ccolor, "%5s", cbuf);
}
if (cb->cb_print_slow_ios) {
if (children == 0) {
/* Only leafs vdevs have slow IOs */
zfs_nicenum(vs->vs_slow_ios, rbuf,
sizeof (rbuf));
} else {
snprintf(rbuf, sizeof (rbuf), "-");
}
if (cb->cb_literal)
printf_color(scolor, " %5llu",
(u_longlong_t)vs->vs_slow_ios);
else
printf_color(scolor, " %5s", rbuf);
}
if (cb->cb_print_power) {
if (children == 0) {
/* Only leaf vdevs have physical slots */
switch (zpool_power_current_state(zhp, (char *)
fnvlist_lookup_string(nv,
ZPOOL_CONFIG_PATH))) {
case 0:
printf_color(ANSI_RED, " %5s",
gettext("off"));
break;
case 1:
printf(" %5s", gettext("on"));
break;
default:
printf(" %5s", "-");
}
} else {
printf(" %5s", "-");
}
}
if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) &&
cb->cb_print_dio_verify) {
zfs_nicenum(vs->vs_dio_verify_errors, dbuf,
sizeof (dbuf));
if (cb->cb_literal)
printf(" %5llu",
(u_longlong_t)vs->vs_dio_verify_errors);
else
printf(" %5s", dbuf);
}
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
(void) printf(" %s %s", gettext("was"), path);
} else if (vs->vs_aux != 0) {
(void) printf(" ");
color_start(ANSI_RED);
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ASHIFT_TOO_BIG:
(void) printf(gettext("unsupported minimum blocksize"));
break;
case VDEV_AUX_SPARED:
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&spare_cb.cb_guid) == 0);
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
zpool_get_name(zhp)) == 0)
(void) printf(gettext("currently in "
"use"));
else
(void) printf(gettext("in use by "
"pool '%s'"),
zpool_get_name(spare_cb.cb_zhp));
zpool_close(spare_cb.cb_zhp);
} else {
(void) printf(gettext("currently in use"));
}
break;
case VDEV_AUX_ERR_EXCEEDED:
if (vs->vs_read_errors + vs->vs_write_errors +
vs->vs_checksum_errors == 0 && children == 0 &&
vs->vs_slow_ios > 0) {
(void) printf(gettext("too many slow I/Os"));
} else {
(void) printf(gettext("too many errors"));
}
break;
case VDEV_AUX_IO_FAILURE:
(void) printf(gettext("experienced I/O failures"));
break;
case VDEV_AUX_BAD_LOG:
(void) printf(gettext("bad intent log"));
break;
case VDEV_AUX_EXTERNAL:
(void) printf(gettext("external device fault"));
break;
case VDEV_AUX_SPLIT_POOL:
(void) printf(gettext("split into new pool"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
color_end();
} else if (children == 0 && !isspare &&
getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
vs->vs_configured_ashift < vs->vs_physical_ashift) {
(void) printf(
gettext(" block size: %dB configured, %dB native"),
1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
}
if (vs->vs_scan_removing != 0) {
(void) printf(gettext(" (removing)"));
} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
(void) printf(gettext(" (non-allocating)"));
}
/* The root vdev has the scrub/resilver stats */
root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
/*
* If you force fault a drive that's resilvering, its scan stats can
* get frozen in time, giving the false impression that it's
* being resilvered. That's why we check the state to see if the vdev
* is healthy before reporting "resilvering" or "repairing".
*/
if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
vs->vs_state == VDEV_STATE_HEALTHY) {
if (vs->vs_scan_processed != 0) {
(void) printf(gettext(" (%s)"),
(ps->pss_func == POOL_SCAN_RESILVER) ?
"resilvering" : "repairing");
} else if (vs->vs_resilver_deferred) {
(void) printf(gettext(" (awaiting resilver)"));
}
}
/* The top-level vdevs have the rebuild stats */
if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
if (vs->vs_rebuild_processed != 0) {
(void) printf(gettext(" (resilvering)"));
}
}
if (cb->vcdl != NULL) {
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
/* Display vdev initialization and trim status for leaves. */
if (children == 0) {
print_status_initialize(vs, cb->cb_print_vdev_init);
print_status_trim(vs, cb->cb_print_vdev_trim);
}
(void) printf("\n");
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE, ishole = B_FALSE;
/* Don't print logs or holes here */
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
if (islog || ishole)
continue;
/* Only print normal classes here */
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
/* Provide vdev_rebuild_stats to children if available */
if (vrs == NULL) {
(void) nvlist_lookup_uint64_array(nv,
ZPOOL_CONFIG_REBUILD_STATS,
(uint64_t **)&vrs, &i);
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_status_config(zhp, cb, vname, child[c], depth + 2,
isspare, vrs);
free(vname);
}
}
/*
* Print the configuration of an exported pool. Iterate over all vdevs in the
* pool, printing out the name and status for each one.
*/
static void
print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
int depth)
{
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
const char *type;
char *vname;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
strcmp(type, VDEV_TYPE_HOLE) == 0)
return;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
if (vs->vs_aux != 0) {
(void) printf(" ");
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
}
(void) printf("\n");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_import_config(cb, vname, child[c], depth + 2);
free(vname);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
(void) printf(gettext("\tcache\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
(void) printf(gettext("\tspares\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
}
/*
* Print specialized class vdevs.
*
* These are recorded as top level vdevs in the main pool child array
* but with "is_log" set to 1 or an "alloc_bias" string. We use either
* print_status_config() or print_import_config() to print the top level
* class vdevs then any of their children (eg mirrored slogs) are printed
* recursively - which works because only the top level vdev is marked.
*/
static void
print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
const char *class)
{
uint_t c, children;
nvlist_t **child;
boolean_t printed = B_FALSE;
assert(zhp != NULL || !cb->cb_verbose);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
const char *bias = NULL;
const char *type = NULL;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log) {
bias = (char *)VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class) != 0)
continue;
if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
(void) printf("\t%s\t\n", gettext(class));
printed = B_TRUE;
}
char *name = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cb->cb_print_status)
print_status_config(zhp, cb, name, child[c], 2,
B_FALSE, NULL);
else
print_import_config(cb, name, child[c], 2);
free(name);
}
}
/*
* Display the status for the given pool.
*/
static int
show_import(nvlist_t *config, boolean_t report_error)
{
uint64_t pool_state;
vdev_stat_t *vs;
const char *name;
uint64_t guid;
uint64_t hostid = 0;
const char *msgid;
const char *hostname = "unknown";
nvlist_t *nvroot, *nvinfo;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t vsc;
const char *comment;
const char *indent;
char buf[2048];
status_cbdata_t cb = { 0 };
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
reason = zpool_import_status(config, &msgid, &errata);
/*
* If we're importing using a cachefile, then we won't report any
* errors unless we are in the scan phase of the import.
*/
if (reason != ZPOOL_STATUS_OK && !report_error)
return (reason);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) {
indent = " ";
} else {
comment = NULL;
indent = "";
}
(void) printf(gettext("%s pool: %s\n"), indent, name);
(void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid);
(void) printf(gettext("%s state: %s"), indent, health);
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext(" (DESTROYED)"));
(void) printf("\n");
if (reason != ZPOOL_STATUS_OK) {
(void) printf("%s", indent);
printf_color(ANSI_BOLD, gettext("status: "));
}
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"missing from the system.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_YELLOW, gettext("One or more devices "
"contains corrupted data.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
printf_color(ANSI_YELLOW, gettext("The pool data is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_YELLOW, gettext("One or more devices "
"are offlined.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk version.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"an incompatible version.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_YELLOW, gettext("Some supported "
"features are not enabled on the pool.\n"
"\t%s(Note that they may be intentionally disabled if the\n"
"\t%s'compatibility' property is set.)\n"), indent, indent);
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
"the file(s) indicated by the 'compatibility'\n"
"\t%sproperty.\n"), indent);
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n"
"\t%srequested by the 'compatibility' property.\n"),
indent);
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_YELLOW, gettext("The pool uses the following "
"feature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_collect_unsup_feat(config, buf, 2048);
(void) printf("%s", buf);
color_end();
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n"
"\t%scannot be accessed in read-write mode because it uses "
"the following\n"
"\t%sfeature(s) not supported on this system:\n"),
indent, indent);
color_start(ANSI_YELLOW);
zpool_collect_unsup_feat(config, buf, 2048);
(void) printf("%s", buf);
color_end();
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
printf_color(ANSI_YELLOW, gettext("The pool is currently "
"imported by another system.\n"));
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
printf_color(ANSI_YELLOW, gettext("The pool has the "
"multihost property on. It cannot\n"
"\t%sbe safely imported when the system hostid is not "
"set.\n"), indent);
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
"by another system.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
"be read.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_YELLOW, gettext("One or more devices were "
"being resilvered.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"configured to use a non-native block size.\n"
"\t%sExpect reduced performance.\n"), indent);
break;
default:
/*
* No other status can be seen when importing pools.
*/
assert(reason == ZPOOL_STATUS_OK);
}
/*
* Print out an action according to the overall state of the pool.
*/
if (vs->vs_state != VDEV_STATE_HEALTHY ||
reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) {
(void) printf("%s", indent);
(void) printf(gettext("action: "));
}
if (vs->vs_state == VDEV_STATE_HEALTHY) {
if (reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED) {
(void) printf(gettext("The pool can be imported using "
"its name or numeric identifier, though\n"
"\t%ssome features will not be available without "
"an explicit 'zpool upgrade'.\n"), indent);
} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
(void) printf(gettext("The pool can be imported using "
"its name or numeric\n"
"\t%sidentifier, though the file(s) indicated by "
"its 'compatibility'\n"
"\t%sproperty cannot be parsed at this time.\n"),
indent, indent);
} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
(void) printf(gettext("The pool can be imported using "
"its name or numeric identifier and\n"
"\t%sthe '-f' flag.\n"), indent);
} else if (reason == ZPOOL_STATUS_ERRATA) {
switch (errata) {
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext("The pool can be "
"imported using its name or numeric "
"identifier,\n"
"\t%showever there is a compatibility "
"issue which should be corrected\n"
"\t%sby running 'zpool scrub'\n"),
indent, indent);
break;
case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
(void) printf(gettext("The pool cannot be "
"imported with this version of ZFS due to\n"
"\t%san active asynchronous destroy. "
"Revert to an earlier version\n"
"\t%sand allow the destroy to complete "
"before updating.\n"), indent, indent);
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext("Existing encrypted "
"datasets contain an on-disk "
"incompatibility, which\n"
"\t%sneeds to be corrected. Backup these "
"datasets to new encrypted datasets\n"
"\t%sand destroy the old ones.\n"),
indent, indent);
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext("Existing encrypted "
"snapshots and bookmarks contain an "
"on-disk\n"
"\t%sincompatibility. This may cause "
"on-disk corruption if they are used\n"
"\t%swith 'zfs recv'. To correct the "
"issue, enable the bookmark_v2 feature.\n"
"\t%sNo additional action is needed if "
"there are no encrypted snapshots or\n"
"\t%sbookmarks. If preserving the "
"encrypted snapshots and bookmarks is\n"
"\t%srequired, use a non-raw send to "
"backup and restore them. Alternately,\n"
"\t%sthey may be removed to resolve the "
"incompatibility.\n"), indent, indent,
indent, indent, indent, indent);
break;
default:
/*
* All errata must contain an action message.
*/
assert(errata == ZPOOL_ERRATA_NONE);
}
} else {
(void) printf(gettext("The pool can be imported using "
"its name or numeric identifier.\n"));
}
} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
(void) printf(gettext("The pool can be imported despite "
"missing or damaged devices. The\n"
"\t%sfault tolerance of the pool may be compromised if "
"imported.\n"), indent);
} else {
switch (reason) {
case ZPOOL_STATUS_VERSION_NEWER:
(void) printf(gettext("The pool cannot be imported. "
"Access the pool on a system running newer\n"
"\t%ssoftware, or recreate the pool from "
"backup.\n"), indent);
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
(void) printf(gettext("The pool cannot be imported. "
"Access the pool on a system that supports\n"
"\t%sthe required feature(s), or recreate the pool "
"from backup.\n"), indent);
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
(void) printf(gettext("The pool cannot be imported in "
"read-write mode. Import the pool with\n"
"\t%s'-o readonly=on', access the pool on a system "
"that supports the\n"
"\t%srequired feature(s), or recreate the pool "
"from backup.\n"), indent, indent);
break;
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
(void) printf(gettext("The pool cannot be imported. "
"Attach the missing\n"
"\t%sdevices and try again.\n"), indent);
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
VERIFY0(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) printf(gettext("The pool must be exported from "
"%s (hostid=%"PRIx64")\n"
"\t%sbefore it can be safely imported.\n"),
hostname, hostid, indent);
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
(void) printf(gettext("Set a unique system hostid with "
"the zgenhostid(8) command.\n"));
break;
default:
(void) printf(gettext("The pool cannot be imported due "
"to damaged devices or data.\n"));
}
}
/* Print the comment attached to the pool. */
if (comment != NULL)
(void) printf(gettext("comment: %s\n"), comment);
/*
* If the state is "closed" or "can't open", and the aux state
* is "corrupt data":
*/
if ((vs->vs_state == VDEV_STATE_CLOSED ||
vs->vs_state == VDEV_STATE_CANT_OPEN) &&
vs->vs_aux == VDEV_AUX_CORRUPT_DATA) {
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext("\t%sThe pool was destroyed, "
"but can be imported using the '-Df' flags.\n"),
indent);
else if (pool_state != POOL_STATE_EXPORTED)
(void) printf(gettext("\t%sThe pool may be active on "
"another system, but can be imported using\n"
"\t%sthe '-f' flag.\n"), indent, indent);
}
if (msgid != NULL) {
(void) printf(gettext("%s see: "
"https://openzfs.github.io/openzfs-docs/msg/%s\n"),
indent, msgid);
}
(void) printf(gettext("%sconfig:\n\n"), indent);
cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
VDEV_NAME_TYPE_ID);
if (cb.cb_namewidth < 10)
cb.cb_namewidth = 10;
print_import_config(&cb, name, nvroot, 0);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
(void) printf(gettext("\n\t%sAdditional devices are known to "
"be part of this pool, though their\n"
"\t%sexact configuration cannot be determined.\n"),
indent, indent);
}
return (0);
}
static boolean_t
zfs_force_import_required(nvlist_t *config)
{
uint64_t state;
uint64_t hostid = 0;
nvlist_t *nvinfo;
state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
/*
* The hostid on LOAD_INFO comes from the MOS label via
* spa_tryimport(). If its not there then we're likely talking to an
* older kernel, so use the top one, which will be from the label
* discovered in zpool_find_import(), or if a cachefile is in use, the
* local hostid.
*/
if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
&hostid);
if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
return (B_TRUE);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state != MMP_STATE_INACTIVE)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Perform the import for the given configuration. This passes the heavy
* lifting off to zpool_import_props(), and then mounts the datasets contained
* within the pool.
*/
static int
do_import(nvlist_t *config, const char *newname, const char *mntopts,
nvlist_t *props, int flags, uint_t mntthreads)
{
int ret = 0;
int ms_status = 0;
zpool_handle_t *zhp;
const char *name;
uint64_t version;
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (!SPA_VERSION_IS_SUPPORTED(version)) {
(void) fprintf(stderr, gettext("cannot import '%s': pool "
"is formatted using an unsupported ZFS version\n"), name);
return (1);
} else if (zfs_force_import_required(config) &&
!(flags & ZFS_IMPORT_ANY_HOST)) {
mmp_state_t mmp_state = MMP_STATE_INACTIVE;
nvlist_t *nvinfo;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state == MMP_STATE_ACTIVE) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool is imported on %s (hostid: "
"0x%"PRIx64")\nExport the pool on the other "
"system, then run 'zpool import'.\n"),
name, hostname, hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) fprintf(stderr, gettext("Cannot import '%s': "
"pool has the multihost property on and the\n"
"system's hostid is not set. Set a unique hostid "
"with the zgenhostid(8) command.\n"), name);
} else {
const char *hostname = "<unknown>";
time_t timestamp = 0;
uint64_t hostid = 0;
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_HOSTNAME);
else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
hostname = fnvlist_lookup_string(config,
ZPOOL_CONFIG_HOSTNAME);
if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
timestamp = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_TIMESTAMP);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_HOSTID);
else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool was previously in use from another system.\n"
"Last accessed by %s (hostid=%"PRIx64") at %s"
"The pool can be imported, use 'zpool import -f' "
"to import the pool.\n"), name, hostname,
hostid, ctime(&timestamp));
}
return (1);
}
if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
return (1);
if (newname != NULL)
name = newname;
if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
return (1);
/*
* Loading keys is best effort. We don't want to return immediately
* if it fails but we do want to give the error to the caller.
*/
if (flags & ZFS_IMPORT_LOAD_KEYS &&
zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
ret = 1;
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
!(flags & ZFS_IMPORT_ONLY)) {
ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads);
if (ms_status == EZFS_SHAREFAILED) {
(void) fprintf(stderr, gettext("Import was "
"successful, but unable to share some datasets\n"));
} else if (ms_status == EZFS_MOUNTFAILED) {
(void) fprintf(stderr, gettext("Import was "
"successful, but unable to mount some datasets\n"));
}
}
zpool_close(zhp);
return (ret);
}
typedef struct import_parameters {
nvlist_t *ip_config;
const char *ip_mntopts;
nvlist_t *ip_props;
int ip_flags;
uint_t ip_mntthreads;
int *ip_err;
} import_parameters_t;
static void
do_import_task(void *arg)
{
import_parameters_t *ip = arg;
*ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts,
ip->ip_props, ip->ip_flags, ip->ip_mntthreads);
free(ip);
}
static int
import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
char *orig_name, char *new_name, importargs_t *import)
{
nvlist_t *config = NULL;
nvlist_t *found_config = NULL;
uint64_t pool_state;
boolean_t pool_specified = (import->poolname != NULL ||
import->guid != 0);
uint_t npools = 0;
tpool_t *tp = NULL;
if (import->do_all) {
tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
0, NULL);
}
/*
* At this point we have a list of import candidate configs. Even if
* we were searching by pool name or guid, we still need to
* post-process the list to deal with pool state and possible
* duplicate names.
*/
int err = 0;
nvpair_t *elem = NULL;
boolean_t first = B_TRUE;
if (!pool_specified && import->do_all) {
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL)
npools++;
}
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, &config) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
if (!import->do_destroyed &&
pool_state == POOL_STATE_DESTROYED)
continue;
if (import->do_destroyed &&
pool_state != POOL_STATE_DESTROYED)
continue;
verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
import->policy) == 0);
if (!pool_specified) {
if (first)
first = B_FALSE;
else if (!import->do_all)
(void) fputc('\n', stdout);
if (import->do_all) {
import_parameters_t *ip = safe_malloc(
sizeof (import_parameters_t));
ip->ip_config = config;
ip->ip_mntopts = mntopts;
ip->ip_props = props;
ip->ip_flags = flags;
ip->ip_mntthreads = mount_tp_nthr / npools;
ip->ip_err = &err;
(void) tpool_dispatch(tp, do_import_task,
(void *)ip);
} else {
/*
* If we're importing from cachefile, then
* we don't want to report errors until we
* are in the scan phase of the import. If
* we get an error, then we return that error
* to invoke the scan phase.
*/
if (import->cachefile && !import->scan)
err = show_import(config, B_FALSE);
else
(void) show_import(config, B_TRUE);
}
} else if (import->poolname != NULL) {
const char *name;
/*
* We are searching for a pool based on name.
*/
verify(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
if (strcmp(name, import->poolname) == 0) {
if (found_config != NULL) {
(void) fprintf(stderr, gettext(
"cannot import '%s': more than "
"one matching pool\n"),
import->poolname);
(void) fprintf(stderr, gettext(
"import by numeric ID instead\n"));
err = B_TRUE;
}
found_config = config;
}
} else {
uint64_t guid;
/*
* Search for a pool by guid.
*/
verify(nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
if (guid == import->guid)
found_config = config;
}
}
if (import->do_all) {
tpool_wait(tp);
tpool_destroy(tp);
}
/*
* If we were searching for a specific pool, verify that we found a
* pool, and then do the import.
*/
if (pool_specified && err == 0) {
if (found_config == NULL) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), orig_name);
err = B_TRUE;
} else {
err |= do_import(found_config, new_name,
mntopts, props, flags, mount_tp_nthr);
}
}
/*
* If we were just looking for pools, report an error if none were
* found.
*/
if (!pool_specified && first)
(void) fprintf(stderr,
gettext("no pools available to import\n"));
return (err);
}
typedef struct target_exists_args {
const char *poolname;
uint64_t poolguid;
} target_exists_args_t;
static int
name_or_guid_exists(zpool_handle_t *zhp, void *data)
{
target_exists_args_t *args = data;
nvlist_t *config = zpool_get_config(zhp, NULL);
int found = 0;
if (config == NULL)
return (0);
if (args->poolname != NULL) {
const char *pool_name;
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&pool_name) == 0);
if (strcmp(pool_name, args->poolname) == 0)
found = 1;
} else {
uint64_t pool_guid;
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0);
if (pool_guid == args->poolguid)
found = 1;
}
zpool_close(zhp);
return (found);
}
/*
* zpool checkpoint <pool>
* checkpoint --discard <pool>
*
* -d Discard the checkpoint from a checkpointed
* --discard pool.
*
* -w Wait for discarding a checkpoint to complete.
* --wait
*
* Checkpoints the specified pool, by taking a "snapshot" of its
* current state. A pool can only have one checkpoint at a time.
*/
int
zpool_do_checkpoint(int argc, char **argv)
{
boolean_t discard, wait;
char *pool;
zpool_handle_t *zhp;
int c, err;
struct option long_options[] = {
{"discard", no_argument, NULL, 'd'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
discard = B_FALSE;
wait = B_FALSE;
while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
switch (c) {
case 'd':
discard = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (wait && !discard) {
(void) fprintf(stderr, gettext("--wait only valid when "
"--discard also specified\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
/* As a special case, check for use of '/' in the name */
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("'zpool checkpoint' "
"doesn't work on datasets. To save the state "
"of a dataset from a specific point in time "
"please use 'zfs snapshot'\n"));
return (1);
}
if (discard) {
err = (zpool_discard_checkpoint(zhp) != 0);
if (err == 0 && wait)
err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
} else {
err = (zpool_checkpoint(zhp) != 0);
}
zpool_close(zhp);
return (err);
}
#define CHECKPOINT_OPT 1024
/*
* zpool prefetch <type> [<type opts>] <pool>
*
* Prefetchs a particular type of data in the specified pool.
*/
int
zpool_do_prefetch(int argc, char **argv)
{
int c;
char *poolname;
char *typestr = NULL;
zpool_prefetch_type_t type;
zpool_handle_t *zhp;
int err = 0;
while ((c = getopt(argc, argv, "t:")) != -1) {
switch (c) {
case 't':
typestr = optarg;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
poolname = argv[0];
argc--;
argv++;
if (strcmp(typestr, "ddt") == 0) {
type = ZPOOL_PREFETCH_DDT;
} else {
(void) fprintf(stderr, gettext("unsupported prefetch type\n"));
usage(B_FALSE);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
err = zpool_prefetch(zhp, type);
zpool_close(zhp);
return (err);
}
/*
* zpool import [-d dir] [-D]
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] -a
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
* [newpool]
*
* -c Read pool information from a cachefile instead of searching
* devices. If importing from a cachefile config fails, then
* fallback to searching for devices only in the directories that
* exist in the cachefile.
*
* -d Scan in a specific directory, other than /dev/. More than
* one directory can be specified using multiple '-d' options.
*
* -D Scan for previously destroyed pools or import all or only
* specified destroyed pools.
*
* -R Temporarily import the pool, with all mountpoints relative to
* the given root. The pool will remain exported when the machine
* is rebooted.
*
* -V Import even in the presence of faulted vdevs. This is an
* intentionally undocumented option for testing purposes, and
* treats the pool configuration as complete, leaving any bad
* vdevs in the FAULTED state. In other words, it does verbatim
* import.
*
* -f Force import, even if it appears that the pool is active.
*
* -F Attempt rewind if necessary.
*
* -n See if rewind would work, but don't actually rewind.
*
* -N Import the pool but don't mount datasets.
*
* -T Specify a starting txg to use for import. This option is
* intentionally undocumented option for testing purposes.
*
* -a Import all pools found.
*
* -l Load encryption keys while importing.
*
* -o Set property=value and/or temporary mount options (without '=').
*
* -s Scan using the default search path, the libblkid cache will
* not be consulted.
*
* --rewind-to-checkpoint
* Import the pool and revert back to the checkpoint.
*
* The import command scans for pools to import, and import pools based on pool
* name and GUID. The pool can also be renamed as part of the import process.
*/
int
zpool_do_import(int argc, char **argv)
{
char **searchdirs = NULL;
char *env, *envdup = NULL;
int nsearch = 0;
int c;
int err = 0;
nvlist_t *pools = NULL;
boolean_t do_all = B_FALSE;
boolean_t do_destroyed = B_FALSE;
char *mntopts = NULL;
uint64_t searchguid = 0;
char *searchname = NULL;
char *propval;
nvlist_t *policy = NULL;
nvlist_t *props = NULL;
int flags = ZFS_IMPORT_NORMAL;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
boolean_t do_scan = B_FALSE;
boolean_t pool_exists = B_FALSE;
uint64_t txg = -1ULL;
char *cachefile = NULL;
importargs_t idata = { 0 };
char *endptr;
struct option long_options[] = {
{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
long_options, NULL)) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'c':
cachefile = optarg;
break;
case 'd':
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = optarg;
break;
case 'D':
do_destroyed = B_TRUE;
break;
case 'f':
flags |= ZFS_IMPORT_ANY_HOST;
break;
case 'F':
do_rewind = B_TRUE;
break;
case 'l':
flags |= ZFS_IMPORT_LOAD_KEYS;
break;
case 'm':
flags |= ZFS_IMPORT_MISSING_LOG;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'N':
flags |= ZFS_IMPORT_ONLY;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE))
goto error;
} else {
mntopts = optarg;
}
break;
case 'R':
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto error;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto error;
break;
case 's':
do_scan = B_TRUE;
break;
case 't':
flags |= ZFS_IMPORT_TEMP_NAME;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto error;
break;
case 'T':
errno = 0;
txg = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("invalid txg value\n"));
usage(B_FALSE);
}
rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
break;
case 'V':
flags |= ZFS_IMPORT_VERBATIM;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case CHECKPOINT_OPT:
flags |= ZFS_IMPORT_CHECKPOINT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (cachefile && nsearch != 0) {
(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
usage(B_FALSE);
}
if (cachefile && do_scan) {
(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
(void) fprintf(stderr, gettext("-l is only meaningful during "
"an import\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In the future, we can capture further policy and include it here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0)
goto error;
/* check argument count */
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
/*
* Check for the effective uid. We do this explicitly here because
* otherwise any attempt to discover pools will silently fail.
*/
if (argc == 0 && geteuid() != 0) {
(void) fprintf(stderr, gettext("cannot "
"discover pools: permission denied\n"));
free(searchdirs);
nvlist_free(props);
nvlist_free(policy);
return (1);
}
/*
* Depending on the arguments given, we do one of the following:
*
* <none> Iterate through all pools and display information about
* each one.
*
* -a Iterate through all pools and try to import each one.
*
* <id> Find the pool that corresponds to the given GUID/pool
* name and import that one.
*
* -D Above options applies only to destroyed pools.
*/
if (argc != 0) {
char *endptr;
errno = 0;
searchguid = strtoull(argv[0], &endptr, 10);
if (errno != 0 || *endptr != '\0') {
searchname = argv[0];
searchguid = 0;
}
/*
* User specified a name or guid. Ensure it's unique.
*/
target_exists_args_t search = {searchname, searchguid};
pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
}
/*
* Check the environment for the preferred search path.
*/
if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
char *dir, *tmp = NULL;
envdup = strdup(env);
for (dir = strtok_r(envdup, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp)) {
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = dir;
}
}
idata.path = searchdirs;
idata.paths = nsearch;
idata.poolname = searchname;
idata.guid = searchguid;
idata.cachefile = cachefile;
idata.scan = do_scan;
idata.policy = policy;
idata.do_destroyed = do_destroyed;
idata.do_all = do_all;
libpc_handle_t lpch = {
.lpc_lib_handle = g_zfs,
.lpc_ops = &libzfs_config_ops,
.lpc_printerr = B_TRUE
};
pools = zpool_search_import(&lpch, &idata);
if (pools != NULL && pool_exists &&
(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name already exists\n"),
argv[0]);
(void) fprintf(stderr, gettext("use the form '%s "
"<pool | id> <newpool>' to give it a new name\n"),
"zpool import");
err = 1;
} else if (pools == NULL && pool_exists) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name is already created/imported,\n"),
argv[0]);
(void) fprintf(stderr, gettext("and no additional pools "
"with that name were found\n"));
err = 1;
} else if (pools == NULL) {
if (argc != 0) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), argv[0]);
}
err = 1;
}
if (err == 1) {
free(searchdirs);
free(envdup);
nvlist_free(policy);
nvlist_free(pools);
nvlist_free(props);
return (1);
}
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata);
/*
* If we're using the cachefile and we failed to import, then
* fallback to scanning the directory for pools that match
* those in the cachefile.
*/
if (err != 0 && cachefile != NULL) {
(void) printf(gettext("cachefile import failed, retrying\n"));
/*
* We use the scan flag to gather the directories that exist
* in the cachefile. If we need to fallback to searching for
* the pool config, we will only search devices in these
* directories.
*/
idata.scan = B_TRUE;
nvlist_free(pools);
pools = zpool_search_import(&lpch, &idata);
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL,
&idata);
}
error:
nvlist_free(props);
nvlist_free(pools);
nvlist_free(policy);
free(searchdirs);
free(envdup);
return (err ? 1 : 0);
}
/*
* zpool sync [-f] [pool] ...
*
* -f (undocumented) force uberblock (and config including zpool cache file)
* update.
*
* Sync the specified pool(s).
* Without arguments "zpool sync" will sync all pools.
* This command initiates TXG sync(s) and will return after the TXG(s) commit.
*
*/
static int
zpool_do_sync(int argc, char **argv)
{
int ret;
boolean_t force = B_FALSE;
/* check options */
while ((ret = getopt(argc, argv, "f")) != -1) {
switch (ret) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_sync_one on all pools */
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_sync_one, &force);
return (ret);
}
typedef struct iostat_cbdata {
uint64_t cb_flags;
int cb_namewidth;
int cb_iteration;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_scripted;
zpool_list_t *cb_list;
vdev_cmd_data_list_t *vcdl;
vdev_cbdata_t cb_vdevs;
} iostat_cbdata_t;
/* iostat labels */
typedef struct name_and_columns {
const char *name; /* Column name */
unsigned int columns; /* Center name to this number of columns */
} name_and_columns_t;
#define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
{NULL}},
[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
{NULL}},
[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
{"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {NULL}},
[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
{"async_read", 2}, {"async_write", 2}, {"scrub", 2},
{"trim", 2}, {"rebuild", 2}, {NULL}},
};
/* Shorthand - if "columns" field not set, default to 1 column */
static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
{"write"}, {NULL}},
[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
{NULL}},
[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
{"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
{NULL}},
[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {NULL}},
};
static const char *histo_to_title[] = {
[IOS_L_HISTO] = "latency",
[IOS_RQ_HISTO] = "req_size",
};
/*
* Return the number of labels in a null-terminated name_and_columns_t
* array.
*
*/
static unsigned int
label_array_len(const name_and_columns_t *labels)
{
int i = 0;
while (labels[i].name)
i++;
return (i);
}
/*
* Return the number of strings in a null-terminated string array.
* For example:
*
* const char foo[] = {"bar", "baz", NULL}
*
* returns 2
*/
static uint64_t
str_array_len(const char *array[])
{
uint64_t i = 0;
while (array[i])
i++;
return (i);
}
/*
* Return a default column width for default/latency/queue columns. This does
* not include histograms, which have their columns autosized.
*/
static unsigned int
default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
{
unsigned long column_width = 5; /* Normal niceprint */
static unsigned long widths[] = {
/*
* Choose some sane default column sizes for printing the
* raw numbers.
*/
[IOS_DEFAULT] = 15, /* 1PB capacity */
[IOS_LATENCY] = 10, /* 1B ns = 10sec */
[IOS_QUEUES] = 6, /* 1M queue entries */
[IOS_L_HISTO] = 10, /* 1B ns = 10sec */
[IOS_RQ_HISTO] = 6, /* 1M queue entries */
};
if (cb->cb_literal)
column_width = widths[type];
return (column_width);
}
/*
* Print the column labels, i.e:
*
* capacity operations bandwidth
* alloc free read write read write ...
*
* If force_column_width is set, use it for the column width. If not set, use
* the default column width.
*/
static void
print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
{
int i, idx, s;
int text_start, rw_column_width, spaces_to_end;
uint64_t flags = cb->cb_flags;
uint64_t f;
unsigned int column_width = force_column_width;
/* For each bit set in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
if (!force_column_width)
column_width = default_column_width(cb, idx);
/* Print our top labels centered over "read write" label. */
for (i = 0; i < label_array_len(labels[idx]); i++) {
const char *name = labels[idx][i].name;
/*
* We treat labels[][].columns == 0 as shorthand
* for one column. It makes writing out the label
* tables more concise.
*/
unsigned int columns = MAX(1, labels[idx][i].columns);
unsigned int slen = strlen(name);
rw_column_width = (column_width * columns) +
(2 * (columns - 1));
text_start = (int)((rw_column_width) / columns -
slen / columns);
if (text_start < 0)
text_start = 0;
printf(" "); /* Two spaces between columns */
/* Space from beginning of column to label */
for (s = 0; s < text_start; s++)
printf(" ");
printf("%s", name);
/* Print space after label to end of column */
spaces_to_end = rw_column_width - text_start - slen;
if (spaces_to_end < 0)
spaces_to_end = 0;
for (s = 0; s < spaces_to_end; s++)
printf(" ");
}
}
}
/*
* print_cmd_columns - Print custom column titles from -c
*
* If the user specified the "zpool status|iostat -c" then print their custom
* column titles in the header. For example, print_cmd_columns() would print
* the " col1 col2" part of this:
*
* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
* ...
* capacity operations bandwidth
* pool alloc free read write read write col1 col2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
* mypool 269K 1008M 0 0 107 946
* mirror 269K 1008M 0 0 107 946
* sdb - - 0 0 102 473 val1 val2
* sdc - - 0 0 5 473 val1 val2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
*/
static void
print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
{
int i, j;
vdev_cmd_data_t *data = &vcdl->data[0];
if (vcdl->count == 0 || data == NULL)
return;
/*
* Each vdev cmd should have the same column names unless the user did
* something weird with their cmd. Just take the column names from the
* first vdev and assume it works for all of them.
*/
for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
printf(" ");
if (use_dashes) {
for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
printf("-");
} else {
printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
vcdl->uniq_cols[i]);
}
}
}
/*
* Utility function to print out a line of dashes like:
*
* -------------------------------- ----- ----- ----- ----- -----
*
* ...or a dashed named-row line like:
*
* logs - - - - -
*
* @cb: iostat data
*
* @force_column_width If non-zero, use the value as the column width.
* Otherwise use the default column widths.
*
* @name: Print a dashed named-row line starting
* with @name. Otherwise, print a regular
* dashed line.
*/
static void
print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *name)
{
int i;
unsigned int namewidth;
uint64_t flags = cb->cb_flags;
uint64_t f;
int idx;
const name_and_columns_t *labels;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdevs.cb_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
name ? strlen(name) : 0);
if (name) {
printf("%-*s", namewidth, name);
} else {
for (i = 0; i < namewidth; i++)
(void) printf("-");
}
/* For each bit in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
unsigned int column_width;
idx = lowbit64(f) - 1;
if (force_column_width)
column_width = force_column_width;
else
column_width = default_column_width(cb, idx);
labels = iostat_bottom_labels[idx];
for (i = 0; i < label_array_len(labels); i++) {
if (name)
printf(" %*s-", column_width - 1, " ");
else
printf(" %.*s", column_width,
"--------------------");
}
}
}
static void
print_iostat_separator_impl(iostat_cbdata_t *cb,
unsigned int force_column_width)
{
print_iostat_dashes(cb, force_column_width, NULL);
}
static void
print_iostat_separator(iostat_cbdata_t *cb)
{
print_iostat_separator_impl(cb, 0);
}
static void
print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *histo_vdev_name)
{
unsigned int namewidth;
const char *title;
color_start(ANSI_BOLD);
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdevs.cb_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
histo_vdev_name ? strlen(histo_vdev_name) : 0);
if (histo_vdev_name)
printf("%-*s", namewidth, histo_vdev_name);
else
printf("%*s", namewidth, "");
print_iostat_labels(cb, force_column_width, iostat_top_labels);
printf("\n");
printf("%-*s", namewidth, title);
print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 0);
printf("\n");
print_iostat_separator_impl(cb, force_column_width);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 1);
color_end();
printf("\n");
}
static void
print_iostat_header(iostat_cbdata_t *cb)
{
print_iostat_header_impl(cb, 0, NULL);
}
/*
* Prints a size string (i.e. 120M) with the suffix ("M") colored
* by order of magnitude. Uses column_size to add padding.
*/
static void
print_stat_color(const char *statbuf, unsigned int column_size)
{
fputs(" ", stdout);
size_t len = strlen(statbuf);
while (len < column_size) {
fputc(' ', stdout);
column_size--;
}
if (*statbuf == '0') {
color_start(ANSI_GRAY);
fputc('0', stdout);
} else {
for (; *statbuf; statbuf++) {
if (*statbuf == 'K') color_start(ANSI_GREEN);
else if (*statbuf == 'M') color_start(ANSI_YELLOW);
else if (*statbuf == 'G') color_start(ANSI_RED);
else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
else if (*statbuf == 'E') color_start(ANSI_CYAN);
fputc(*statbuf, stdout);
if (--column_size <= 0)
break;
}
}
color_end();
}
/*
* Display a single statistic.
*/
static void
print_one_stat(uint64_t value, enum zfs_nicenum_format format,
unsigned int column_size, boolean_t scripted)
{
char buf[64];
zfs_nicenum_format(value, buf, sizeof (buf), format);
if (scripted)
printf("\t%s", buf);
else
print_stat_color(buf, column_size);
}
/*
* Calculate the default vdev stats
*
* Subtract oldvs from newvs, apply a scaling factor, and save the resulting
* stats into calcvs.
*/
static void
calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
vdev_stat_t *calcvs)
{
int i;
memcpy(calcvs, newvs, sizeof (*calcvs));
for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
}
/*
* Internal representation of the extended iostats data.
*
* The extended iostat stats are exported in nvlists as either uint64_t arrays
* or single uint64_t's. We make both look like arrays to make them easier
* to process. In order to make single uint64_t's look like arrays, we set
* __data to the stat data, and then set *data = &__data with count = 1. Then,
* we can just use *data and count.
*/
struct stat_array {
uint64_t *data;
uint_t count; /* Number of entries in data[] */
uint64_t __data; /* Only used when data is a single uint64_t */
};
static uint64_t
stat_histo_max(struct stat_array *nva, unsigned int len)
{
uint64_t max = 0;
int i;
for (i = 0; i < len; i++)
max = MAX(max, array64_max(nva[i].data, nva[i].count));
return (max);
}
/*
* Helper function to lookup a uint64_t array or uint64_t value and store its
* data as a stat_array. If the nvpair is a single uint64_t value, then we make
* it look like a one element array to make it easier to process.
*/
static int
nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
struct stat_array *nva)
{
nvpair_t *tmp;
int ret;
verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
switch (nvpair_type(tmp)) {
case DATA_TYPE_UINT64_ARRAY:
ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
break;
case DATA_TYPE_UINT64:
ret = nvpair_value_uint64(tmp, &nva->__data);
nva->data = &nva->__data;
nva->count = 1;
break;
default:
/* Not a uint64_t */
ret = EINVAL;
break;
}
return (ret);
}
/*
* Given a list of nvlist names, look up the extended stats in newnv and oldnv,
* subtract them, and return the results in a newly allocated stat_array.
* You must free the returned array after you are done with it with
* free_calc_stats().
*
* Additionally, you can set "oldnv" to NULL if you simply want the newnv
* values.
*/
static struct stat_array *
calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
nvlist_t *newnv)
{
nvlist_t *oldnvx = NULL, *newnvx;
struct stat_array *oldnva, *newnva, *calcnva;
int i, j;
unsigned int alloc_size = (sizeof (struct stat_array)) * len;
/* Extract our extended stats nvlist from the main list */
verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&newnvx) == 0);
if (oldnv) {
verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&oldnvx) == 0);
}
newnva = safe_malloc(alloc_size);
oldnva = safe_malloc(alloc_size);
calcnva = safe_malloc(alloc_size);
for (j = 0; j < len; j++) {
verify(nvpair64_to_stat_array(newnvx, names[j],
&newnva[j]) == 0);
calcnva[j].count = newnva[j].count;
alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
calcnva[j].data = safe_malloc(alloc_size);
memcpy(calcnva[j].data, newnva[j].data, alloc_size);
if (oldnvx) {
verify(nvpair64_to_stat_array(oldnvx, names[j],
&oldnva[j]) == 0);
for (i = 0; i < oldnva[j].count; i++)
calcnva[j].data[i] -= oldnva[j].data[i];
}
}
free(newnva);
free(oldnva);
return (calcnva);
}
static void
free_calc_stats(struct stat_array *nva, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
free(nva[i].data);
free(nva);
}
static void
print_iostat_histo(struct stat_array *nva, unsigned int len,
iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
double scale)
{
int i, j;
char buf[6];
uint64_t val;
enum zfs_nicenum_format format;
unsigned int buckets;
unsigned int start_bucket;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
/* All these histos are the same size, so just use nva[0].count */
buckets = nva[0].count;
if (cb->cb_flags & IOS_RQ_HISTO_M) {
/* Start at 512 - req size should never be lower than this */
start_bucket = 9;
} else {
start_bucket = 0;
}
for (j = start_bucket; j < buckets; j++) {
/* Print histogram bucket label */
if (cb->cb_flags & IOS_L_HISTO_M) {
/* Ending range of this bucket */
val = (1UL << (j + 1)) - 1;
zfs_nicetime(val, buf, sizeof (buf));
} else {
/* Request size (starting range of bucket) */
val = (1UL << j);
zfs_nicenum(val, buf, sizeof (buf));
}
if (cb->cb_scripted)
printf("%llu", (u_longlong_t)val);
else
printf("%-*s", namewidth, buf);
/* Print the values on the line */
for (i = 0; i < len; i++) {
print_one_stat(nva[i].data[j] * scale, format,
column_width, cb->cb_scripted);
}
printf("\n");
}
}
static void
print_solid_separator(unsigned int length)
{
while (length--)
printf("-");
printf("\n");
}
static void
print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv, double scale, const char *name)
{
unsigned int column_width;
unsigned int namewidth;
unsigned int entire_width;
enum iostat_type type;
struct stat_array *nva;
const char **names;
unsigned int names_len;
/* What type of histo are we? */
type = IOS_HISTO_IDX(cb->cb_flags);
/* Get NULL-terminated array of nvlist names for our histo */
names = vsx_type_to_nvlist[type];
names_len = str_array_len(names); /* num of names */
nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
if (cb->cb_literal) {
column_width = MAX(5,
(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
} else {
column_width = 5;
}
namewidth = MAX(cb->cb_namewidth,
strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
/*
* Calculate the entire line width of what we're printing. The
* +2 is for the two spaces between columns:
*/
/* read write */
/* ----- ----- */
/* |___| <---------- column_width */
/* */
/* |__________| <--- entire_width */
/* */
entire_width = namewidth + (column_width + 2) *
label_array_len(iostat_bottom_labels[type]);
if (cb->cb_scripted)
printf("%s\n", name);
else
print_iostat_header_impl(cb, column_width, name);
print_iostat_histo(nva, names_len, cb, column_width,
namewidth, scale);
free_calc_stats(nva, names_len);
if (!cb->cb_scripted)
print_solid_separator(entire_width);
}
/*
* Calculate the average latency of a power-of-two latency histogram
*/
static uint64_t
single_histo_average(uint64_t *histo, unsigned int buckets)
{
int i;
uint64_t count = 0, total = 0;
for (i = 0; i < buckets; i++) {
/*
* Our buckets are power-of-two latency ranges. Use the
* midpoint latency of each bucket to calculate the average.
* For example:
*
* Bucket Midpoint
* 8ns-15ns: 12ns
* 16ns-31ns: 24ns
* ...
*/
if (histo[i] != 0) {
total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
count += histo[i];
}
}
/* Prevent divide by zero */
return (count == 0 ? 0 : total / count);
}
static void
print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
{
const char *names[] = {
ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_QUEUES);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
for (int i = 0; i < ARRAY_SIZE(names); i++) {
uint64_t val = nva[i].data[0];
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
static void
print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_LATENCY);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAWTIME;
else
format = ZFS_NICENUM_TIME;
/* Print our avg latencies on the line */
for (i = 0; i < ARRAY_SIZE(names); i++) {
/* Compute average latency for a latency histo */
val = single_histo_average(nva[i].data, nva[i].count);
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
/*
* Print default statistics (capacity/operations/bandwidth)
*/
static void
print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
{
unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
enum zfs_nicenum_format format;
char na; /* char to print for "not applicable" values */
if (cb->cb_literal) {
format = ZFS_NICENUM_RAW;
na = '0';
} else {
format = ZFS_NICENUM_1024;
na = '-';
}
/* only toplevel vdevs have capacity stats */
if (vs->vs_space == 0) {
if (cb->cb_scripted)
printf("\t%c\t%c", na, na);
else
printf(" %*c %*c", column_width, na, column_width,
na);
} else {
print_one_stat(vs->vs_alloc, format, column_width,
cb->cb_scripted);
print_one_stat(vs->vs_space - vs->vs_alloc, format,
column_width, cb->cb_scripted);
}
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
}
static const char *const class_name[] = {
VDEV_ALLOC_BIAS_DEDUP,
VDEV_ALLOC_BIAS_SPECIAL,
VDEV_ALLOC_CLASS_LOGS
};
/*
* Print out all the statistics for the given vdev. This can either be the
* toplevel configuration, or called recursively. If 'name' is NULL, then this
* is a verbose output, and we don't want to display the toplevel pool stats.
*
* Returns the number of stat lines printed.
*/
static unsigned int
print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
{
nvlist_t **oldchild, **newchild;
uint_t c, children, oldchildren;
vdev_stat_t *oldvs, *newvs, *calcvs;
vdev_stat_t zerovs = { 0 };
char *vname;
int i;
int ret = 0;
uint64_t tdelta;
double scale;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return (ret);
calcvs = safe_malloc(sizeof (*calcvs));
if (oldnv != NULL) {
verify(nvlist_lookup_uint64_array(oldnv,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
} else {
oldvs = &zerovs;
}
/* Do we only want to see a specific vdev? */
for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
/* Yes we do. Is this the vdev? */
if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
/*
* This is our vdev. Since it is the only vdev we
* will be displaying, make depth = 0 so that it
* doesn't get indented.
*/
depth = 0;
break;
}
}
if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
/* Couldn't match the name */
goto children;
}
verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&newvs, &c) == 0);
/*
* Print the vdev name unless it's is a histogram. Histograms
* display the vdev name in the header itself.
*/
if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
if (cb->cb_scripted) {
printf("%s", name);
} else {
if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) -
depth), "");
}
}
/* Calculate our scaling factor */
tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
/*
* If we specify printing histograms with no time interval, then
* print the histogram numbers over the entire lifetime of the
* vdev.
*/
scale = 1;
} else {
if (tdelta == 0)
scale = 1.0;
else
scale = (double)NANOSEC / tdelta;
}
if (cb->cb_flags & IOS_DEFAULT_M) {
calc_default_iostats(oldvs, newvs, calcvs);
print_iostat_default(calcvs, cb, scale);
}
if (cb->cb_flags & IOS_LATENCY_M)
print_iostat_latency(cb, oldnv, newnv);
if (cb->cb_flags & IOS_QUEUES_M)
print_iostat_queues(cb, newnv);
if (cb->cb_flags & IOS_ANYHISTO_M) {
printf("\n");
print_iostat_histos(cb, oldnv, newnv, scale, name);
}
if (cb->vcdl != NULL) {
const char *path;
if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
&path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
if (!(cb->cb_flags & IOS_ANYHISTO_M))
printf("\n");
ret++;
children:
free(calcvs);
if (!cb->cb_verbose)
return (ret);
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
/*
* print normal top-level devices
*/
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE, islog = B_FALSE;
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
&islog);
if (ishole || islog)
continue;
if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
newchild[c], cb, depth + 2);
free(vname);
}
/*
* print all other top-level devices
*/
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE;
const char *bias = NULL;
const char *type = NULL;
(void) nvlist_lookup_uint64(newchild[c],
ZPOOL_CONFIG_IS_LOG, &islog);
if (islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
!cb->cb_scripted &&
!cb->cb_vdevs.cb_names) {
print_iostat_dashes(cb, 0,
class_name[n]);
}
printf("\n");
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ?
oldchild[c] : NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
/*
* Include level 2 ARC devices in iostat output
*/
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
if (children > 0) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
!cb->cb_vdevs.cb_names) {
print_iostat_dashes(cb, 0, "cache");
}
printf("\n");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
: NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
return (ret);
}
static int
refresh_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
boolean_t missing;
/*
* If the pool has disappeared, remove it from the list and continue.
*/
if (zpool_refresh_stats(zhp, &missing) != 0)
return (-1);
if (missing)
pool_list_remove(cb->cb_list, zhp);
return (0);
}
/*
* Callback to print out the iostats for the given pool.
*/
static int
print_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
nvlist_t *oldconfig, *newconfig;
nvlist_t *oldnvroot, *newnvroot;
int ret;
newconfig = zpool_get_config(zhp, &oldconfig);
if (cb->cb_iteration == 1)
oldconfig = NULL;
verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
&newnvroot) == 0);
if (oldconfig == NULL)
oldnvroot = NULL;
else
verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
&oldnvroot) == 0);
ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
cb, 0);
if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
!cb->cb_scripted && cb->cb_verbose &&
!cb->cb_vdevs.cb_names_count) {
print_iostat_separator(cb);
if (cb->vcdl != NULL) {
print_cmd_columns(cb->vcdl, 1);
}
printf("\n");
}
return (ret);
}
static int
get_columns(void)
{
struct winsize ws;
int columns = 80;
int error;
if (isatty(STDOUT_FILENO)) {
error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
if (error == 0)
columns = ws.ws_col;
} else {
columns = 999;
}
return (columns);
}
/*
* Return the required length of the pool/vdev name column. The minimum
* allowed width and output formatting flags must be provided.
*/
static int
get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
{
nvlist_t *config, *nvroot;
int width = min_width;
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
size_t poolname_len = strlen(zpool_get_name(zhp));
if (verbose == B_FALSE) {
width = MAX(poolname_len, min_width);
} else {
width = MAX(poolname_len,
max_width(zhp, nvroot, 0, min_width, flags));
}
}
return (width);
}
/*
* Parse the input string, get the 'interval' and 'count' value if there is one.
*/
static void
get_interval_count(int *argcp, char **argv, float *iv,
unsigned long *cnt)
{
float interval = 0;
unsigned long count = 0;
int argc = *argcp;
/*
* Determine if the last argument is an integer or a pool name
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
/*
* If this is not a valid number, just plow on. The
* user will get a more informative error message later
* on.
*/
interval = 0;
}
}
/*
* If the last argument is also an integer, then we have both a count
* and an interval.
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
count = interval;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
interval = 0;
}
}
*iv = interval;
*cnt = count;
*argcp = argc;
}
static void
get_timestamp_arg(char c)
{
if (c == 'u')
timestamp_fmt = UDATE;
else if (c == 'd')
timestamp_fmt = DDATE;
else
usage(B_FALSE);
}
/*
* Return stat flags that are supported by all pools by both the module and
* zpool iostat. "*data" should be initialized to all 0xFFs before running.
* It will get ANDed down until only the flags that are supported on all pools
* remain.
*/
static int
get_stat_flags_cb(zpool_handle_t *zhp, void *data)
{
uint64_t *mask = data;
nvlist_t *config, *nvroot, *nvx;
uint64_t flags = 0;
int i, j;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
/* Default stats are always supported, but for completeness.. */
if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
flags |= IOS_DEFAULT_M;
/* Get our extended stats nvlist from the main list */
if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
&nvx) != 0) {
/*
* No extended stats; they're probably running an older
* module. No big deal, we support that too.
*/
goto end;
}
/* For each extended stat, make sure all its nvpairs are supported */
for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
if (!vsx_type_to_nvlist[j][0])
continue;
/* Start off by assuming the flag is supported, then check */
flags |= (1ULL << j);
for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
/* flag isn't supported */
flags = flags & ~(1ULL << j);
break;
}
}
}
end:
*mask = *mask & flags;
return (0);
}
/*
* Return a bitmask of stats that are supported on all pools by both the module
* and zpool iostat.
*/
static uint64_t
get_stat_flags(zpool_list_t *list)
{
uint64_t mask = -1;
/*
* get_stat_flags_cb() will lop off bits from "mask" until only the
* flags that are supported on all pools remain.
*/
pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
return (mask);
}
/*
* Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
*/
static int
is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
{
uint64_t guid;
vdev_cbdata_t *cb = cb_data;
zpool_handle_t *zhp = zhp_data;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (0);
return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
}
/*
* Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
*/
static int
is_vdev(zpool_handle_t *zhp, void *cb_data)
{
return (for_each_vdev(zhp, is_vdev_cb, cb_data));
}
/*
* Check if vdevs are in a pool
*
* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
* return 0. If pool_name is NULL, then search all pools.
*/
static int
are_vdevs_in_pool(int argc, char **argv, char *pool_name,
vdev_cbdata_t *cb)
{
char **tmp_name;
int ret = 0;
int i;
int pool_count = 0;
if ((argc == 0) || !*argv)
return (0);
if (pool_name)
pool_count = 1;
/* Temporarily hijack cb_names for a second... */
tmp_name = cb->cb_names;
/* Go though our list of prospective vdev names */
for (i = 0; i < argc; i++) {
cb->cb_names = argv + i;
/* Is this name a vdev in our pools? */
ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
if (!ret) {
/* No match */
break;
}
}
cb->cb_names = tmp_name;
return (ret);
}
static int
is_pool_cb(zpool_handle_t *zhp, void *data)
{
char *name = data;
if (strcmp(name, zpool_get_name(zhp)) == 0)
return (1);
return (0);
}
/*
* Do we have a pool named *name? If so, return 1, otherwise 0.
*/
static int
is_pool(char *name)
{
return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
is_pool_cb, name));
}
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
static int
are_all_pools(int argc, char **argv)
{
if ((argc == 0) || !*argv)
return (0);
while (--argc >= 0)
if (!is_pool(argv[argc]))
return (0);
return (1);
}
/*
* Helper function to print out vdev/pool names we can't resolve. Used for an
* error message.
*/
static void
error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
vdev_cbdata_t *cb)
{
int i;
char *name;
char *str;
for (i = 0; i < argc; i++) {
name = argv[i];
if (is_pool(name))
str = gettext("pool");
else if (are_vdevs_in_pool(1, &name, pool_name, cb))
str = gettext("vdev in this pool");
else if (are_vdevs_in_pool(1, &name, NULL, cb))
str = gettext("vdev in another pool");
else
str = gettext("unknown");
fprintf(stderr, "\t%s (%s)\n", name, str);
}
}
/*
* Same as get_interval_count(), but with additional checks to not misinterpret
* guids as interval/count values. Assumes VDEV_NAME_GUID is set in
* cb.cb_vdevs.cb_name_flags.
*/
static void
get_interval_count_filter_guids(int *argc, char **argv, float *interval,
unsigned long *count, iostat_cbdata_t *cb)
{
char **tmpargv = argv;
int argc_for_interval = 0;
/* Is the last arg an interval value? Or a guid? */
if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
&cb->cb_vdevs)) {
/*
* The last arg is not a guid, so it's probably an
* interval value.
*/
argc_for_interval++;
if (*argc >= 2 &&
!are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
&cb->cb_vdevs)) {
/*
* The 2nd to last arg is not a guid, so it's probably
* an interval value.
*/
argc_for_interval++;
}
}
/* Point to our list of possible intervals */
tmpargv = &argv[*argc - argc_for_interval];
*argc = *argc - argc_for_interval;
get_interval_count(&argc_for_interval, tmpargv,
interval, count);
}
/*
* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
* if we were unable to determine its size.
*/
static int
terminal_height(void)
{
struct winsize win;
if (isatty(STDOUT_FILENO) == 0)
return (-1);
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
return (win.ws_row);
return (-1);
}
/*
* Run one of the zpool status/iostat -c scripts with the help (-h) option and
* print the result.
*
* name: Short name of the script ('iostat').
* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
*/
static void
print_zpool_script_help(char *name, char *path)
{
char *argv[] = {path, (char *)"-h", NULL};
char **lines = NULL;
int lines_cnt = 0;
int rc;
rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
&lines_cnt);
if (rc != 0 || lines == NULL || lines_cnt <= 0) {
if (lines != NULL)
libzfs_free_str_array(lines, lines_cnt);
return;
}
for (int i = 0; i < lines_cnt; i++)
if (!is_blank_str(lines[i]))
printf(" %-14s %s\n", name, lines[i]);
libzfs_free_str_array(lines, lines_cnt);
}
/*
* Go though the zpool status/iostat -c scripts in the user's path, run their
* help option (-h), and print out the results.
*/
static void
print_zpool_dir_scripts(char *dirpath)
{
DIR *dir;
struct dirent *ent;
char fullpath[MAXPATHLEN];
struct stat dir_stat;
if ((dir = opendir(dirpath)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
dirpath, ent->d_name) >= sizeof (fullpath)) {
(void) fprintf(stderr,
gettext("internal error: "
"ZPOOL_SCRIPTS_PATH too large.\n"));
exit(1);
}
/* Print the scripts */
if (stat(fullpath, &dir_stat) == 0)
if (dir_stat.st_mode & S_IXUSR &&
S_ISREG(dir_stat.st_mode))
print_zpool_script_help(ent->d_name,
fullpath);
}
closedir(dir);
}
}
/*
* Print out help text for all zpool status/iostat -c scripts.
*/
static void
print_zpool_script_list(const char *subcommand)
{
char *dir, *sp, *tmp;
printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
sp = zpool_get_cmd_search_path();
if (sp == NULL)
return;
for (dir = strtok_r(sp, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp))
print_zpool_dir_scripts(dir);
free(sp);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 10,
* but may be as large as the column width - 42 so it still fits on one line.
* NOTE: 42 is the width of the default capacity/operations/bandwidth output
*/
static int
get_namewidth_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
int width, available_width;
/*
* get_namewidth() returns the maximum width of any name in that column
* for any pool/vdev/device line that will be output.
*/
width = get_namewidth(zhp, cb->cb_namewidth,
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
/*
* The width we are calculating is the width of the header and also the
* padding width for names that are less than maximum width. The stats
* take up 42 characters, so the width available for names is:
*/
available_width = get_columns() - 42;
/*
* If the maximum width fits on a screen, then great! Make everything
* line up by justifying all lines to the same width. If that max
* width is larger than what's available, the name plus stats won't fit
* on one line, and justifying to that width would cause every line to
* wrap on the screen. We only want lines with long names to wrap.
* Limit the padding to what won't wrap.
*/
if (width > available_width)
width = available_width;
/*
* And regardless of whatever the screen width is (get_columns can
* return 0 if the width is not known or less than 42 for a narrow
* terminal) have the width be a minimum of 10.
*/
if (width < 10)
width = 10;
/* Save the calculated width */
cb->cb_namewidth = width;
return (0);
}
/*
* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
* -v Display statistics for individual vdevs
* -h Display help
* -p Display values in parsable (exact) format.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -l Display average latency
* -q Display queue depths
* -w Display latency histograms
* -r Display request size histogram
* -T Display a timestamp in date(1) or Unix format
* -n Only print headers once
*
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
* on pool_list_update() to detect the addition of new pools. Configuration
* changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
int npools;
float interval = 0;
unsigned long count = 0;
int winheight = 24;
zpool_list_t *list;
boolean_t verbose = B_FALSE;
boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
boolean_t omit_since_boot = B_FALSE;
boolean_t guid = B_FALSE;
boolean_t follow_links = B_FALSE;
boolean_t full_name = B_FALSE;
boolean_t headers_once = B_FALSE;
iostat_cbdata_t cb = { 0 };
char *cmd = NULL;
/* Used for printing error message */
const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
uint64_t unsupported_flags;
/* check options */
while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
verbose = B_TRUE;
break;
case 'g':
guid = B_TRUE;
break;
case 'L':
follow_links = B_TRUE;
break;
case 'P':
full_name = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
verbose = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'l':
latency = B_TRUE;
break;
case 'q':
queues = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'w':
l_histo = B_TRUE;
break;
case 'r':
rq_histo = B_TRUE;
break;
case 'y':
omit_since_boot = B_TRUE;
break;
case 'n':
headers_once = B_TRUE;
break;
case 'h':
usage(B_FALSE);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("iostat");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
cb.cb_literal = parsable;
cb.cb_scripted = scripted;
if (guid)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
if (follow_links)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (full_name)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
cb.cb_iteration = 0;
cb.cb_namewidth = 0;
cb.cb_verbose = verbose;
/* Get our interval and count values (if any) */
if (guid) {
get_interval_count_filter_guids(&argc, argv, &interval,
&count, &cb);
} else {
get_interval_count(&argc, argv, &interval, &count);
}
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
/* All the args are vdevs */
cb.cb_vdevs.cb_names = argv;
cb.cb_vdevs.cb_names_count = argc;
argc = 0; /* No pools to process */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) {
/* ...and the rest are vdev names */
cb.cb_vdevs.cb_names = argv + 1;
cb.cb_vdevs.cb_names_count = argc - 1;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected either a list of "));
fprintf(stderr, gettext("pools, or list of vdevs in"));
fprintf(stderr, " \"%s\", ", argv[0]);
fprintf(stderr, gettext("but got:\n"));
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The args don't make sense. The first arg isn't a pool name,
* nor are all the args vdevs.
*/
fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
fprintf(stderr, "\n");
return (1);
}
if (cb.cb_vdevs.cb_names_count != 0) {
/*
* If user specified vdevs, it implies verbose.
*/
cb.cb_verbose = B_TRUE;
}
/*
* Construct the list of all interesting pools.
*/
ret = 0;
if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
&ret)) == NULL)
return (1);
if (pool_list_count(list) == 0 && argc != 0) {
pool_list_free(list);
return (1);
}
if (pool_list_count(list) == 0 && interval == 0) {
pool_list_free(list);
(void) fprintf(stderr, gettext("no pools available\n"));
return (1);
}
if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
usage(B_FALSE);
return (1);
}
if (l_histo && rq_histo) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("Only one of [-r|-w] can be passed at a time\n"));
usage(B_FALSE);
return (1);
}
/*
* Enter the main iostat loop.
*/
cb.cb_list = list;
if (l_histo) {
/*
* Histograms tables look out of place when you try to display
* them with the other stats, so make a rule that you can only
* print histograms by themselves.
*/
cb.cb_flags = IOS_L_HISTO_M;
} else if (rq_histo) {
cb.cb_flags = IOS_RQ_HISTO_M;
} else {
cb.cb_flags = IOS_DEFAULT_M;
if (latency)
cb.cb_flags |= IOS_LATENCY_M;
if (queues)
cb.cb_flags |= IOS_QUEUES_M;
}
/*
* See if the module supports all the stats we want to display.
*/
unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
if (unsupported_flags) {
uint64_t f;
int idx;
fprintf(stderr,
gettext("The loaded zfs module doesn't support:"));
/* for each bit set in unsupported_flags */
for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
fprintf(stderr, " -%c", flag_to_arg[idx]);
}
fprintf(stderr, ". Try running a newer module.\n");
pool_list_free(list);
return (1);
}
for (;;) {
if ((npools = pool_list_count(list)) == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else {
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
*/
boolean_t skip = (omit_since_boot &&
cb.cb_iteration == 0);
/*
* Refresh all statistics. This is done as an
* explicit step before calculating the maximum name
* width, so that any * configuration changes are
* properly accounted for.
*/
(void) pool_list_iter(list, B_FALSE, refresh_iostat,
&cb);
/*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE,
get_namewidth_iostat, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL && cb.cb_verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) {
cb.vcdl = all_pools_for_each_vdev_run(argc,
argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
cb.cb_vdevs.cb_names_count,
cb.cb_vdevs.cb_name_flags);
} else {
cb.vcdl = NULL;
}
/*
* Check terminal size so we can print headers
* even when terminal window has its height
* changed.
*/
winheight = terminal_height();
/*
* Are we connected to TTY? If not, headers_once
* should be true, to avoid breaking scripts.
*/
if (winheight < 0)
headers_once = B_TRUE;
/*
* If it's the first time and we're not skipping it,
* or either skip or verbose mode, print the header.
*
* The histogram code explicitly prints its header on
* every vdev, so skip this for histograms.
*/
if (((++cb.cb_iteration == 1 && !skip) ||
(skip != verbose) ||
(!headers_once &&
(cb.cb_iteration % winheight) == 0)) &&
(!(cb.cb_flags & IOS_ANYHISTO_M)) &&
!cb.cb_scripted)
print_iostat_header(&cb);
if (skip) {
(void) fflush(stdout);
(void) fsleep(interval);
continue;
}
pool_list_iter(list, B_FALSE, print_iostat, &cb);
/*
* If there's more than one pool, and we're not in
* verbose mode (which prints a separator for us),
* then print a separator.
*
* In addition, if we're printing specific vdevs then
* we also want an ending separator.
*/
if (((npools > 1 && !verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) ||
(!(cb.cb_flags & IOS_ANYHISTO_M) &&
cb.cb_vdevs.cb_names_count)) &&
!cb.cb_scripted) {
print_iostat_separator(&cb);
if (cb.vcdl != NULL)
print_cmd_columns(cb.vcdl, 1);
printf("\n");
}
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
}
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fflush(stdout);
(void) fsleep(interval);
}
pool_list_free(list);
return (ret);
}
typedef struct list_cbdata {
boolean_t cb_verbose;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_json;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
boolean_t cb_literal;
nvlist_t *cb_jsobj;
boolean_t cb_json_as_int;
boolean_t cb_json_pool_key_guid;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZPOOL_MAXPROPLEN];
const char *header;
boolean_t first = B_TRUE;
boolean_t right_justify;
size_t width = 0;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first)
(void) fputs(" ", stdout);
else
first = B_FALSE;
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
header = zpool_prop_column_name(pl->pl_prop);
right_justify = zpool_prop_align_right(pl->pl_prop);
} else {
int i;
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) fputs(header, stdout);
else if (right_justify)
(void) printf("%*s", (int)width, header);
else
(void) printf("%-*s", (int)width, header);
}
(void) fputc('\n', stdout);
}
/*
* Given a pool and a list of properties, print out all the properties according
* to the described layout. Used by zpool_do_list().
*/
static void
collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZPOOL_MAXPROPLEN];
const char *propstr;
boolean_t right_justify;
size_t width;
zprop_source_t sourcetype = ZPROP_SRC_NONE;
nvlist_t *item, *d, *props;
item = d = props = NULL;
if (cb->cb_json) {
item = fnvlist_alloc();
props = fnvlist_alloc();
d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools");
if (d == NULL) {
fprintf(stderr, "pools obj not found.\n");
exit(1);
}
fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int);
}
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!cb->cb_json && !first) {
if (cb->cb_scripted)
(void) fputc('\t', stdout);
else
(void) fputs(" ", stdout);
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
if (zpool_get_prop(zhp, pl->pl_prop, property,
sizeof (property), &sourcetype,
cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zpool_prop_align_right(pl->pl_prop);
} else if ((zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop)) &&
zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
sizeof (property)) == 0) {
propstr = property;
sourcetype = ZPROP_SRC_LOCAL;
} else if (zfs_prop_user(pl->pl_user_prop) &&
zpool_get_userprop(zhp, pl->pl_user_prop, property,
sizeof (property), &sourcetype) == 0) {
propstr = property;
} else {
propstr = "-";
}
if (cb->cb_json) {
if (pl->pl_prop == ZPOOL_PROP_NAME)
continue;
const char *prop_name;
if (pl->pl_prop != ZPROP_USERPROP)
prop_name = zpool_prop_to_name(pl->pl_prop);
else
prop_name = pl->pl_user_prop;
(void) zprop_nvlist_one_property(
prop_name, propstr,
sourcetype, NULL, NULL, props, cb->cb_json_as_int);
} else {
/*
* If this is being called in scripted mode, or if this
* is the last column and it is left-justified, don't
* include a width format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL &&
!right_justify))
(void) fputs(propstr, stdout);
else if (right_justify)
(void) printf("%*s", (int)width, propstr);
else
(void) printf("%-*s", (int)width, propstr);
}
}
if (cb->cb_json) {
fnvlist_add_nvlist(item, "properties", props);
if (cb->cb_json_pool_key_guid) {
char pool_guid[256];
uint64_t guid = fnvlist_lookup_uint64(
zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_POOL_GUID);
snprintf(pool_guid, 256, "%llu",
(u_longlong_t)guid);
fnvlist_add_nvlist(d, pool_guid, item);
} else {
fnvlist_add_nvlist(d, zpool_get_name(zhp),
item);
}
fnvlist_free(props);
fnvlist_free(item);
} else
(void) fputc('\n', stdout);
}
static void
collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,
boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format,
boolean_t json, nvlist_t *nvl, boolean_t as_int)
{
char propval[64];
boolean_t fixed;
size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
case ZPOOL_PROP_DEDUPRATIO:
case ZPOOL_PROP_DEDUPCACHED:
if (value == 0)
(void) strlcpy(propval, "-", sizeof (propval));
else
zfs_nicenum_format(value, propval, sizeof (propval),
format);
break;
case ZPOOL_PROP_FRAGMENTATION:
if (value == ZFS_FRAG_INVALID) {
(void) strlcpy(propval, "-", sizeof (propval));
} else if (format == ZFS_NICENUM_RAW) {
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value);
} else {
(void) snprintf(propval, sizeof (propval), "%llu%%",
(unsigned long long)value);
}
break;
case ZPOOL_PROP_CAPACITY:
/* capacity value is in parts-per-10,000 (aka permyriad) */
if (format == ZFS_NICENUM_RAW)
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value / 100);
else
(void) snprintf(propval, sizeof (propval),
value < 1000 ? "%1.2f%%" : value < 10000 ?
"%2.1f%%" : "%3.0f%%", value / 100.0);
break;
case ZPOOL_PROP_HEALTH:
width = 8;
(void) strlcpy(propval, str, sizeof (propval));
break;
default:
zfs_nicenum_format(value, propval, sizeof (propval), format);
}
if (!valid)
(void) strlcpy(propval, "-", sizeof (propval));
if (json) {
zprop_nvlist_one_property(zpool_prop_to_name(prop), propval,
ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);
} else {
if (scripted)
(void) printf("\t%s", propval);
else
(void) printf(" %*s", (int)width, propval);
}
}
/*
* print static default line per vdev
* not compatible with '-o' <proplist> option
*/
static void
collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item)
{
nvlist_t **child;
vdev_stat_t *vs;
uint_t c, children = 0;
char *vname;
boolean_t scripted = cb->cb_scripted;
uint64_t islog = B_FALSE;
nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;
props = ent = ch = obj = sp = l2c = NULL;
const char *dashes = "%-*s - - - - "
"- - - - -\n";
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (name != NULL) {
boolean_t toplevel = (vs->vs_space != 0);
uint64_t cap;
enum zfs_nicenum_format format;
const char *state;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return;
if (cb->cb_json) {
props = fnvlist_alloc();
ent = fnvlist_alloc();
fill_vdev_info(ent, zhp, (char *)name, B_FALSE,
cb->cb_json_as_int);
} else {
if (scripted)
(void) printf("\t%s", name);
else if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) -
depth), "");
}
/*
* Print the properties for the individual vdevs. Some
* properties are only applicable to toplevel vdevs. The
* 'toplevel' boolean value is passed to the print_one_column()
* to indicate that the value is valid.
*/
if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) {
collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
scripted, B_TRUE, format, cb->cb_json, props,
cb->cb_json_as_int);
} else {
collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
scripted, toplevel, format, cb->cb_json, props,
cb->cb_json_as_int);
}
collect_vdev_prop(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
scripted, toplevel, format, cb->cb_json, props,
cb->cb_json_as_int);
collect_vdev_prop(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
NULL, scripted, toplevel, format, cb->cb_json, props,
cb->cb_json_as_int);
collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,
vs->vs_checkpoint_space, NULL, scripted, toplevel, format,
cb->cb_json, props, cb->cb_json_as_int);
collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
scripted, B_TRUE, format, cb->cb_json, props,
cb->cb_json_as_int);
collect_vdev_prop(ZPOOL_PROP_FRAGMENTATION,
vs->vs_fragmentation, NULL, scripted,
(vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
format, cb->cb_json, props, cb->cb_json_as_int);
cap = (vs->vs_space == 0) ? 0 :
(vs->vs_alloc * 10000 / vs->vs_space);
collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, NULL,
scripted, toplevel, format, cb->cb_json, props,
cb->cb_json_as_int);
collect_vdev_prop(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
scripted, toplevel, format, cb->cb_json, props,
cb->cb_json_as_int);
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
if (vs->vs_aux == VDEV_AUX_SPARED)
state = "INUSE";
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = "AVAIL";
}
collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, scripted,
B_TRUE, format, cb->cb_json, props, cb->cb_json_as_int);
if (cb->cb_json) {
fnvlist_add_nvlist(ent, "properties", props);
fnvlist_free(props);
} else
(void) fputc('\n', stdout);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
if (cb->cb_json) {
fnvlist_add_nvlist(item, name, ent);
fnvlist_free(ent);
}
return;
}
if (cb->cb_json) {
ch = fnvlist_alloc();
}
/* list the normal vdevs first */
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
continue;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
if (name == NULL || cb->cb_json != B_TRUE)
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE, item);
else if (cb->cb_json) {
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE, ch);
}
free(vname);
}
if (cb->cb_json) {
if (!nvlist_empty(ch))
fnvlist_add_nvlist(ent, "vdevs", ch);
fnvlist_free(ch);
}
/* list the classes: 'logs', 'dedup', and 'special' */
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
boolean_t printed = B_FALSE;
if (cb->cb_json)
obj = fnvlist_alloc();
for (c = 0; c < children; c++) {
const char *bias = NULL;
const char *type = NULL;
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog) == 0 && islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed && !cb->cb_json) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth,
class_name[n]);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE, obj);
free(vname);
}
if (cb->cb_json) {
if (!nvlist_empty(obj))
fnvlist_add_nvlist(item, class_name[n], obj);
fnvlist_free(obj);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
if (cb->cb_json) {
l2c = fnvlist_alloc();
} else {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "cache");
}
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE, l2c);
free(vname);
}
if (cb->cb_json) {
if (!nvlist_empty(l2c))
fnvlist_add_nvlist(item, "l2cache", l2c);
fnvlist_free(l2c);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
&children) == 0 && children > 0) {
if (cb->cb_json) {
sp = fnvlist_alloc();
} else {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "spare");
}
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
B_TRUE, sp);
free(vname);
}
if (cb->cb_json) {
if (!nvlist_empty(sp))
fnvlist_add_nvlist(item, "spares", sp);
fnvlist_free(sp);
}
}
if (name != NULL && cb->cb_json) {
fnvlist_add_nvlist(item, name, ent);
fnvlist_free(ent);
}
}
/*
* Generic callback function to list a pool.
*/
static int
list_callback(zpool_handle_t *zhp, void *data)
{
nvlist_t *p, *d, *nvdevs;
uint64_t guid;
char pool_guid[256];
const char *pool_name = zpool_get_name(zhp);
list_cbdata_t *cbp = data;
p = d = nvdevs = NULL;
collect_pool(zhp, cbp);
if (cbp->cb_verbose) {
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (cbp->cb_json) {
d = fnvlist_lookup_nvlist(cbp->cb_jsobj,
"pools");
if (cbp->cb_json_pool_key_guid) {
guid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID);
snprintf(pool_guid, 256, "%llu",
(u_longlong_t)guid);
p = fnvlist_lookup_nvlist(d, pool_guid);
} else {
p = fnvlist_lookup_nvlist(d, pool_name);
}
nvdevs = fnvlist_alloc();
}
collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs);
if (cbp->cb_json) {
fnvlist_add_nvlist(p, "vdevs", nvdevs);
if (cbp->cb_json_pool_key_guid)
fnvlist_add_nvlist(d, pool_guid, p);
else
fnvlist_add_nvlist(d, pool_name, p);
fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
fnvlist_free(nvdevs);
}
}
return (0);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 9,
* but may be as large as needed.
*/
static int
get_namewidth_list(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cb = data;
int width;
width = get_namewidth(zhp, cb->cb_namewidth,
cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
if (width < 9)
width = 9;
cb->cb_namewidth = width;
return (0);
}
/*
* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
*
* -g Display guid for individual vdev name.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -L Follow links when resolving vdev path name.
* -o List of properties to display. Defaults to
* "name,size,allocated,free,expandsize,fragmentation,capacity,"
* "dedupratio,health,altroot"
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -T Display a timestamp in date(1) or Unix format
* -j Display the output in JSON format
* --json-int Display the numbers as integer instead of strings.
* --json-pool-key-guid Set pool GUID as key for pool objects.
*
* List all pools in the system, whether or not they're healthy. Output space
* statistics for each one, as well as health status summary.
*/
int
zpool_do_list(int argc, char **argv)
{
int c;
int ret = 0;
list_cbdata_t cb = { 0 };
static char default_props[] =
"name,size,allocated,free,checkpoint,expandsize,fragmentation,"
"capacity,dedupratio,health,altroot";
char *props = default_props;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
boolean_t first = B_TRUE;
nvlist_t *data = NULL;
current_prop_type = ZFS_TYPE_POOL;
struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
{"json-pool-key-guid", no_argument, NULL,
ZPOOL_OPTION_POOL_KEY_GUID},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options,
NULL)) != -1) {
switch (c) {
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'o':
props = optarg;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'j':
cb.cb_json = B_TRUE;
break;
case ZPOOL_OPTION_JSON_NUMS_AS_INT:
cb.cb_json_as_int = B_TRUE;
cb.cb_literal = B_TRUE;
break;
case ZPOOL_OPTION_POOL_KEY_GUID:
cb.cb_json_pool_key_guid = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
cb.cb_verbose = B_TRUE;
cb.cb_namewidth = 8; /* 8 until precalc is avail */
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (!cb.cb_json && cb.cb_json_as_int) {
(void) fprintf(stderr, gettext("'--json-int' only works with"
" '-j' option\n"));
usage(B_FALSE);
}
if (!cb.cb_json && cb.cb_json_pool_key_guid) {
(void) fprintf(stderr, gettext("'json-pool-key-guid' only"
" works with '-j' option\n"));
usage(B_FALSE);
}
get_interval_count(&argc, argv, &interval, &count);
if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
for (;;) {
if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0)
break;
if (cb.cb_json) {
cb.cb_jsobj = zpool_json_schema(0, 1);
data = fnvlist_alloc();
fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
fnvlist_free(data);
}
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
if (timestamp_fmt != NODATE) {
if (cb.cb_json) {
if (cb.cb_json_as_int) {
fnvlist_add_uint64(cb.cb_jsobj, "time",
time(NULL));
} else {
char ts[128];
get_timestamp(timestamp_fmt, ts, 128);
fnvlist_add_string(cb.cb_jsobj, "time",
ts);
}
} else
print_timestamp(timestamp_fmt);
}
if (!cb.cb_scripted && (first || cb.cb_verbose) &&
!cb.cb_json) {
print_header(&cb);
first = B_FALSE;
}
ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
if (ret == 0 && cb.cb_json)
zcmd_print_json(cb.cb_jsobj);
else if (ret != 0 && cb.cb_json)
nvlist_free(cb.cb_jsobj);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
pool_list_free(list);
(void) fflush(stdout);
(void) fsleep(interval);
}
if (argc == 0 && !cb.cb_scripted && !cb.cb_json &&
pool_list_count(list) == 0) {
(void) printf(gettext("no pools available\n"));
ret = 0;
}
pool_list_free(list);
zprop_free_list(cb.cb_proplist);
return (ret);
}
static int
zpool_do_attach_or_replace(int argc, char **argv, int replacing)
{
boolean_t force = B_FALSE;
boolean_t rebuild = B_FALSE;
boolean_t wait = B_FALSE;
int c;
nvlist_t *nvroot;
char *poolname, *old_disk, *new_disk;
zpool_handle_t *zhp;
nvlist_t *props = NULL;
char *propval;
int ret;
/* check options */
while ((c = getopt(argc, argv, "fo:sw")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 's':
rebuild = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
old_disk = argv[1];
if (argc < 3) {
if (!replacing) {
(void) fprintf(stderr,
gettext("missing <new_device> specification\n"));
usage(B_FALSE);
}
new_disk = old_disk;
argc -= 1;
argv += 1;
} else {
new_disk = argv[2];
argc -= 2;
argv += 2;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
nvlist_free(props);
return (1);
}
if (zpool_get_config(zhp, NULL) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
nvlist_free(props);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
nvlist_free(props);
return (1);
}
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
rebuild);
if (ret == 0 && wait) {
zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
char raidz_prefix[] = "raidz";
if (replacing) {
activity = ZPOOL_WAIT_REPLACE;
} else if (strncmp(old_disk,
raidz_prefix, strlen(raidz_prefix)) == 0) {
activity = ZPOOL_WAIT_RAIDZ_EXPAND;
}
ret = zpool_wait(zhp, activity);
}
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for replacing to complete before returning
*
* Replace <device> with <new_device>.
*/
int
zpool_do_replace(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
}
/*
* zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for resilvering (mirror) or expansion (raidz) to complete
* before returning.
*
* Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
* mirror or raidz. If <device> is not part of a mirror, then <device> will
* be transformed into a mirror of <device> and <new_device>. When a mirror
* is involved, <new_device> will begin life with a DTL of [0, now], and will
* immediately begin to resilver itself. For the raidz case, a expansion will
* commence and reflow the raidz data across all the disks including the
* <new_device>.
*/
int
zpool_do_attach(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
}
/*
* zpool detach [-f] <pool> <device>
*
* -f Force detach of <device>, even if DTLs argue against it
* (not supported yet)
*
* Detach a device from a mirror. The operation will be refused if <device>
* is the last device in the mirror, or if the DTLs indicate that this device
* has the only valid copy of some data.
*/
int
zpool_do_detach(int argc, char **argv)
{
int c;
char *poolname, *path;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
path = argv[1];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_vdev_detach(zhp, path);
zpool_close(zhp);
return (ret);
}
/*
* zpool split [-gLnP] [-o prop=val] ...
* [-o mntopt] ...
* [-R altroot] <pool> <newpool> [<device> ...]
*
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not split the pool, but display the resulting layout if
* it were to be split.
* -o Set property=value, or set mount options.
* -P Display full path for vdev name.
* -R Mount the split-off pool under an alternate root.
* -l Load encryption keys while importing.
*
* Splits the named pool and gives it the new pool name. Devices to be split
* off may be listed, provided that no more than one device is specified
* per top-level vdev mirror. The newly split pool is left in an exported
* state unless -R is specified.
*
* Restrictions: the top-level of the pool pool must only be made up of
* mirrors; all devices in the pool must be healthy; no device may be
* undergoing a resilvering operation.
*/
int
zpool_do_split(int argc, char **argv)
{
char *srcpool, *newpool, *propval;
char *mntopts = NULL;
splitflags_t flags;
int c, ret = 0;
int ms_status = 0;
boolean_t loadkeys = B_FALSE;
zpool_handle_t *zhp;
nvlist_t *config, *props = NULL;
flags.dryrun = B_FALSE;
flags.import = B_FALSE;
flags.name_flags = 0;
/* check options */
while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
switch (c) {
case 'g':
flags.name_flags |= VDEV_NAME_GUID;
break;
case 'L':
flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'R':
flags.import = B_TRUE;
if (add_prop_list(
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'l':
loadkeys = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
} else {
mntopts = optarg;
}
break;
case 'P':
flags.name_flags |= VDEV_NAME_PATH;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
break;
}
}
if (!flags.import && mntopts != NULL) {
(void) fprintf(stderr, gettext("setting mntopts is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
if (!flags.import && loadkeys) {
(void) fprintf(stderr, gettext("loading keys is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("Missing new pool name\n"));
usage(B_FALSE);
}
srcpool = argv[0];
newpool = argv[1];
argc -= 2;
argv += 2;
if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
nvlist_free(props);
return (1);
}
config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
if (config == NULL) {
ret = 1;
} else {
if (flags.dryrun) {
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), newpool);
print_vdev_tree(NULL, newpool, config, 0, "",
flags.name_flags);
print_vdev_tree(NULL, "dedup", config, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", config, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
}
}
zpool_close(zhp);
if (ret != 0 || flags.dryrun || !flags.import) {
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* The split was successful. Now we need to open the new
* pool and import it.
*/
if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
nvlist_free(config);
nvlist_free(props);
return (1);
}
if (loadkeys) {
ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
if (ret != 0)
ret = 1;
}
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
ms_status = zpool_enable_datasets(zhp, mntopts, 0,
mount_tp_nthr);
if (ms_status == EZFS_SHAREFAILED) {
(void) fprintf(stderr, gettext("Split was successful, "
"datasets are mounted but sharing of some datasets "
"has failed\n"));
} else if (ms_status == EZFS_MOUNTFAILED) {
(void) fprintf(stderr, gettext("Split was successful"
", but some datasets could not be mounted\n"));
(void) fprintf(stderr, gettext("Try doing '%s' with a "
"different altroot\n"), "zpool import");
}
}
zpool_close(zhp);
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* zpool online [--power] <pool> <device> ...
*
* --power: Power on the enclosure slot to the drive (if possible)
*/
int
zpool_do_online(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
vdev_state_t newstate;
int flags = 0;
boolean_t is_power_on = B_FALSE;
struct option long_options[] = {
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
switch (c) {
case 'e':
flags |= ZFS_ONLINE_EXPAND;
break;
case ZPOOL_OPTION_POWER:
is_power_on = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
is_power_on = B_TRUE;
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
(void) fprintf(stderr, gettext("failed to open pool "
"\"%s\""), poolname);
return (1);
}
for (i = 1; i < argc; i++) {
vdev_state_t oldstate;
boolean_t avail_spare, l2cache;
int rc;
if (is_power_on) {
rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
if (rc == ENOTSUP) {
(void) fprintf(stderr,
gettext("Power control not supported\n"));
}
if (rc != 0)
return (rc);
}
nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
&l2cache, NULL);
if (tgt == NULL) {
ret = 1;
(void) fprintf(stderr, gettext("couldn't find device "
"\"%s\" in pool \"%s\"\n"), argv[i], poolname);
continue;
}
uint_t vsc;
oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
if ((rc = zpool_vdev_online(zhp, argv[i], flags,
&newstate)) == 0) {
if (newstate != VDEV_STATE_HEALTHY) {
(void) printf(gettext("warning: device '%s' "
"onlined, but remains in faulted state\n"),
argv[i]);
if (newstate == VDEV_STATE_FAULTED)
(void) printf(gettext("use 'zpool "
"clear' to restore a faulted "
"device\n"));
else
(void) printf(gettext("use 'zpool "
"replace' to replace devices "
"that are no longer present\n"));
if ((flags & ZFS_ONLINE_EXPAND)) {
(void) printf(gettext("%s: failed "
"to expand usable space on "
"unhealthy device '%s'\n"),
(oldstate >= VDEV_STATE_DEGRADED ?
"error" : "warning"), argv[i]);
if (oldstate >= VDEV_STATE_DEGRADED) {
ret = 1;
break;
}
}
}
} else {
(void) fprintf(stderr, gettext("Failed to online "
"\"%s\" in pool \"%s\": %d\n"),
argv[i], poolname, rc);
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool offline [-ft]|[--power] <pool> <device> ...
*
*
* -f Force the device into a faulted state.
*
* -t Only take the device off-line temporarily. The offline/faulted
* state will not be persistent across reboots.
*
* --power Power off the enclosure slot to the drive (if possible)
*/
int
zpool_do_offline(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
boolean_t istmp = B_FALSE;
boolean_t fault = B_FALSE;
boolean_t is_power_off = B_FALSE;
struct option long_options[] = {
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
switch (c) {
case 'f':
fault = B_TRUE;
break;
case 't':
istmp = B_TRUE;
break;
case ZPOOL_OPTION_POWER:
is_power_off = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (is_power_off && fault) {
(void) fprintf(stderr,
gettext("-0 and -f cannot be used together\n"));
usage(B_FALSE);
return (1);
}
if (is_power_off && istmp) {
(void) fprintf(stderr,
gettext("-0 and -t cannot be used together\n"));
usage(B_FALSE);
return (1);
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
(void) fprintf(stderr, gettext("failed to open pool "
"\"%s\""), poolname);
return (1);
}
for (i = 1; i < argc; i++) {
uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
if (is_power_off) {
/*
* Note: we have to power off first, then set REMOVED,
* or else zpool_vdev_set_removed_state() returns
* EAGAIN.
*/
ret = zpool_power_off(zhp, argv[i]);
if (ret != 0) {
(void) fprintf(stderr, "%s %s %d\n",
gettext("unable to power off slot for"),
argv[i], ret);
}
zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
} else if (fault) {
vdev_aux_t aux;
if (istmp == B_FALSE) {
/* Force the fault to persist across imports */
aux = VDEV_AUX_EXTERNAL_PERSIST;
} else {
aux = VDEV_AUX_EXTERNAL;
}
if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
ret = 1;
} else {
if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool clear [-nF]|[--power] <pool> [device]
*
* Clear all errors associated with a pool or a particular device.
*/
int
zpool_do_clear(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
boolean_t is_power_on = B_FALSE;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
nvlist_t *policy = NULL;
zpool_handle_t *zhp;
char *pool, *device;
struct option long_options[] = {
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, "FnX", long_options,
NULL)) != -1) {
switch (c) {
case 'F':
do_rewind = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case ZPOOL_OPTION_POWER:
is_power_on = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
is_power_on = B_TRUE;
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In future, further rewind policy choices can be passed along here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0) {
return (1);
}
pool = argv[0];
device = argc == 2 ? argv[1] : NULL;
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
nvlist_free(policy);
return (1);
}
if (is_power_on) {
if (device == NULL) {
zpool_power_on_pool_and_wait_for_devices(zhp);
} else {
zpool_power_on_and_disk_wait(zhp, device);
}
}
if (zpool_clear(zhp, device, policy) != 0)
ret = 1;
zpool_close(zhp);
nvlist_free(policy);
return (ret);
}
/*
* zpool reguid [-g <guid>] <pool>
*/
int
zpool_do_reguid(int argc, char **argv)
{
uint64_t guid;
uint64_t *guidp = NULL;
int c;
char *endptr;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
/* check options */
while ((c = getopt(argc, argv, "g:")) != -1) {
switch (c) {
case 'g':
errno = 0;
guid = strtoull(optarg, &endptr, 10);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("invalid GUID: %s\n"), optarg);
usage(B_FALSE);
}
guidp = &guid;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_set_guid(zhp, guidp);
zpool_close(zhp);
return (ret);
}
/*
* zpool reopen <pool>
*
* Reopen the pool so that the kernel can update the sizes of all vdevs.
*/
int
zpool_do_reopen(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t scrub_restart = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "n")) != -1) {
switch (c) {
case 'n':
scrub_restart = B_FALSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_reopen_one on all pools */
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_reopen_one, &scrub_restart);
return (ret);
}
typedef struct scrub_cbdata {
int cb_type;
pool_scrub_cmd_t cb_scrub_cmd;
} scrub_cbdata_t;
static boolean_t
zpool_has_checkpoint(zpool_handle_t *zhp)
{
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
if (config != NULL) {
pool_checkpoint_stat_t *pcs = NULL;
uint_t c;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return (B_FALSE);
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
return (B_TRUE);
}
return (B_FALSE);
}
static int
scrub_callback(zpool_handle_t *zhp, void *data)
{
scrub_cbdata_t *cb = data;
int err;
/*
* Ignore faulted pools.
*/
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
(void) fprintf(stderr, gettext("cannot scan '%s': pool is "
"currently unavailable\n"), zpool_get_name(zhp));
return (1);
}
err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
if (err == 0 && zpool_has_checkpoint(zhp) &&
cb->cb_type == POOL_SCAN_SCRUB) {
(void) printf(gettext("warning: will not scrub state that "
"belongs to the checkpoint of pool '%s'\n"),
zpool_get_name(zhp));
}
return (err != 0);
}
static int
wait_callback(zpool_handle_t *zhp, void *data)
{
zpool_wait_activity_t *act = data;
return (zpool_wait(zhp, *act));
}
/*
* zpool scrub [-e | -s | -p | -C] [-w] <pool> ...
*
* -e Only scrub blocks in the error log.
* -s Stop. Stops any in-progress scrub.
* -p Pause. Pause in-progress scrub.
* -w Wait. Blocks until scrub has completed.
* -C Scrub from last saved txg.
*/
int
zpool_do_scrub(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
boolean_t wait = B_FALSE;
int error;
cb.cb_type = POOL_SCAN_SCRUB;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
boolean_t is_error_scrub = B_FALSE;
boolean_t is_pause = B_FALSE;
boolean_t is_stop = B_FALSE;
boolean_t is_txg_continue = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "spweC")) != -1) {
switch (c) {
case 'e':
is_error_scrub = B_TRUE;
break;
case 's':
is_stop = B_TRUE;
break;
case 'p':
is_pause = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case 'C':
is_txg_continue = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (is_pause && is_stop) {
(void) fprintf(stderr, gettext("invalid option "
- "combination :-s and -p are mutually exclusive\n"));
+ "combination: -s and -p are mutually exclusive\n"));
usage(B_FALSE);
} else if (is_pause && is_txg_continue) {
(void) fprintf(stderr, gettext("invalid option "
- "combination :-p and -C are mutually exclusive\n"));
+ "combination: -p and -C are mutually exclusive\n"));
usage(B_FALSE);
} else if (is_stop && is_txg_continue) {
(void) fprintf(stderr, gettext("invalid option "
- "combination :-s and -C are mutually exclusive\n"));
+ "combination: -s and -C are mutually exclusive\n"));
usage(B_FALSE);
} else if (is_error_scrub && is_txg_continue) {
(void) fprintf(stderr, gettext("invalid option "
- "combination :-e and -C are mutually exclusive\n"));
+ "combination: -e and -C are mutually exclusive\n"));
usage(B_FALSE);
} else {
if (is_error_scrub)
cb.cb_type = POOL_SCAN_ERRORSCRUB;
if (is_pause) {
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
} else if (is_stop) {
cb.cb_type = POOL_SCAN_NONE;
} else if (is_txg_continue) {
cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG;
} else {
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
}
}
if (wait && (cb.cb_type == POOL_SCAN_NONE ||
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-w cannot be used with -p or -s\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, scrub_callback, &cb);
if (wait && !error) {
zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, wait_callback, &act);
}
return (error);
}
/*
* zpool resilver <pool> ...
*
* Restarts any in-progress resilver
*/
int
zpool_do_resilver(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
cb.cb_type = POOL_SCAN_RESILVER;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, scrub_callback, &cb));
}
/*
* zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
*
* -c Cancel. Ends any in-progress trim.
* -d Secure trim. Requires kernel and device support.
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
* adding a multiplier suffix such as 'k' or 'm'.
* -s Suspend. TRIM can then be restarted with no flags.
* -w Wait. Blocks until trimming has completed.
*/
int
zpool_do_trim(int argc, char **argv)
{
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"secure", no_argument, NULL, 'd'},
{"rate", required_argument, NULL, 'r'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_trim_func_t cmd_type = POOL_TRIM_START;
uint64_t rate = 0;
boolean_t secure = B_FALSE;
boolean_t wait = B_FALSE;
int c;
while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
!= -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_CANCEL;
break;
case 'd':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-d cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
secure = B_TRUE;
break;
case 'r':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-r cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
(void) fprintf(stderr, "%s: %s\n",
gettext("invalid value for rate"),
libzfs_error_description(g_zfs));
usage(B_FALSE);
}
break;
case 's':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_TRIM_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
char *poolname = argv[0];
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
trimflags_t trim_flags = {
.secure = secure,
.rate = rate,
.wait = wait,
};
nvlist_t *vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
trim_flags.fullpool = B_TRUE;
} else {
trim_flags.fullpool = B_FALSE;
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
fnvlist_free(vdevs);
zpool_close(zhp);
return (error);
}
/*
* Converts a total number of seconds to a human readable string broken
* down in to days/hours/minutes/seconds.
*/
static void
secs_to_dhms(uint64_t total, char *buf)
{
uint64_t days = total / 60 / 60 / 24;
uint64_t hours = (total / 60 / 60) % 24;
uint64_t mins = (total / 60) % 60;
uint64_t secs = (total % 60);
if (days > 0) {
(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
(u_longlong_t)days, (u_longlong_t)hours,
(u_longlong_t)mins, (u_longlong_t)secs);
} else {
(void) sprintf(buf, "%02llu:%02llu:%02llu",
(u_longlong_t)hours, (u_longlong_t)mins,
(u_longlong_t)secs);
}
}
/*
* Print out detailed error scrub status.
*/
static void
print_err_scrub_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t total_secs_left;
uint64_t secs_left, mins_left, hours_left, days_left;
uint64_t examined, to_be_examined;
if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
return;
}
(void) printf(gettext(" scrub: "));
start = ps->pss_error_scrub_start;
end = ps->pss_error_scrub_end;
pause = ps->pss_pass_error_scrub_pause;
examined = ps->pss_error_scrub_examined;
to_be_examined = ps->pss_error_scrub_to_be_examined;
assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
if (ps->pss_error_scrub_state == DSS_FINISHED) {
total_secs_left = end - start;
days_left = total_secs_left / 60 / 60 / 24;
hours_left = (total_secs_left / 60 / 60) % 24;
mins_left = (total_secs_left / 60) % 60;
secs_left = (total_secs_left % 60);
(void) printf(gettext("scrubbed %llu error blocks in %llu days "
"%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
(u_longlong_t)days_left, (u_longlong_t)hours_left,
(u_longlong_t)mins_left, (u_longlong_t)secs_left,
ctime(&end));
return;
} else if (ps->pss_error_scrub_state == DSS_CANCELED) {
(void) printf(gettext("error scrub canceled on %s"),
ctime(&end));
return;
}
assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
/* Error scrub is in progress. */
if (pause == 0) {
(void) printf(gettext("error scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("error scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\terror scrub started on %s"),
ctime(&start));
}
double fraction_done = (double)examined / (to_be_examined + examined);
(void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
" blocks"), 100 * fraction_done, (u_longlong_t)examined);
(void) printf("\n");
}
/*
* Print out detailed scrub status.
*/
static void
print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
uint64_t elapsed, scan_rate, issue_rate;
double fraction_done;
char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
/* If there's never been a scan, there's not much to say. */
if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
ps->pss_func >= POOL_SCAN_FUNCS) {
(void) printf(gettext("none requested\n"));
return;
}
start = ps->pss_start_time;
end = ps->pss_end_time;
pause = ps->pss_pass_scrub_pause;
zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
assert(is_resilver || is_scrub);
/* Scan is finished or canceled. */
if (ps->pss_state == DSS_FINISHED) {
secs_to_dhms(end - start, time_buf);
if (is_scrub) {
(void) printf(gettext("scrub repaired %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
} else if (is_resilver) {
(void) printf(gettext("resilvered %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
}
return;
} else if (ps->pss_state == DSS_CANCELED) {
if (is_scrub) {
(void) printf(gettext("scrub canceled on %s"),
ctime(&end));
} else if (is_resilver) {
(void) printf(gettext("resilver canceled on %s"),
ctime(&end));
}
return;
}
assert(ps->pss_state == DSS_SCANNING);
/* Scan is in progress. Resilvers can't be paused. */
if (is_scrub) {
if (pause == 0) {
(void) printf(gettext("scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\tscrub started on %s"),
ctime(&start));
}
} else if (is_resilver) {
(void) printf(gettext("resilver in progress since %s"),
ctime(&start));
}
scanned = ps->pss_examined;
pass_scanned = ps->pss_pass_exam;
issued = ps->pss_issued;
pass_issued = ps->pss_pass_issued;
total_s = ps->pss_to_examine;
total_i = ps->pss_to_examine - ps->pss_skipped;
/* we are only done with a block once we have issued the IO for it */
fraction_done = (double)issued / total_i;
/* elapsed time for this pass, rounding up to 1 if it's 0 */
elapsed = time(NULL) - ps->pss_pass_start;
elapsed -= ps->pss_pass_scrub_spent_paused;
elapsed = (elapsed != 0) ? elapsed : 1;
scan_rate = pass_scanned / elapsed;
issue_rate = pass_issued / elapsed;
/* format all of the numbers we will be reporting */
zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
/* do not print estimated time if we have a paused scrub */
(void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
if (pause == 0 && scan_rate > 0) {
zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
(void) printf(gettext(" at %s/s"), srate_buf);
}
(void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
if (pause == 0 && issue_rate > 0) {
zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
(void) printf(gettext(" at %s/s"), irate_buf);
}
(void) printf(gettext("\n"));
if (is_resilver) {
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
processed_buf, 100 * fraction_done);
} else if (is_scrub) {
(void) printf(gettext("\t%s repaired, %.2f%% done"),
processed_buf, 100 * fraction_done);
}
if (pause == 0) {
/*
* Only provide an estimate iff:
* 1) we haven't yet issued all we expected, and
* 2) the issue rate exceeds 10 MB/s, and
* 3) it's either:
* a) a resilver which has started repairs, or
* b) a scrub which has entered the issue phase.
*/
if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
((is_resilver && ps->pss_processed > 0) ||
(is_scrub && issued > 0))) {
secs_to_dhms((total_i - issued) / issue_rate, time_buf);
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
static void
print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
{
if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
return;
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
uint64_t bytes_issued = vrs->vrs_bytes_issued;
uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
uint64_t bytes_est_s = vrs->vrs_bytes_est;
uint64_t bytes_est_i = vrs->vrs_bytes_est;
if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
bytes_est_i -= vrs->vrs_pass_bytes_skipped;
uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
(vrs->vrs_pass_time_ms + 1)) * 1000;
uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
(vrs->vrs_pass_time_ms + 1)) * 1000;
double scan_pct = MIN((double)bytes_scanned * 100 /
(bytes_est_s + 1), 100);
/* Format all of the numbers we will be reporting */
char bytes_scanned_buf[7], bytes_issued_buf[7];
char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
sizeof (bytes_scanned_buf));
zfs_nicebytes(bytes_issued, bytes_issued_buf,
sizeof (bytes_issued_buf));
zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
sizeof (bytes_rebuilt_buf));
zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
time_t start = vrs->vrs_start_time;
time_t end = vrs->vrs_end_time;
/* Rebuild is finished or canceled. */
if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
(void) printf(gettext("resilvered (%s) %s in %s "
"with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
(void) printf(gettext("resilver (%s) canceled on %s"),
vdev_name, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
(void) printf(gettext("resilver (%s) in progress since %s"),
vdev_name, ctime(&start));
}
assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
(void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
bytes_est_s_buf);
if (scan_rate > 0) {
zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
(void) printf(gettext(" at %s/s"), scan_rate_buf);
}
(void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
bytes_est_i_buf);
if (issue_rate > 0) {
zfs_nicebytes(issue_rate, issue_rate_buf,
sizeof (issue_rate_buf));
(void) printf(gettext(" at %s/s"), issue_rate_buf);
}
(void) printf(gettext("\n"));
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
bytes_rebuilt_buf, scan_pct);
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
if (bytes_est_s >= bytes_scanned &&
scan_rate >= 10 * 1024 * 1024) {
secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
time_buf);
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
/*
* Print rebuild status for top-level vdevs.
*/
static void
print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
char *name = zpool_vdev_name(g_zfs, zhp,
child[c], VDEV_NAME_TYPE_ID);
print_rebuild_status_impl(vrs, i, name);
free(name);
}
}
}
/*
* As we don't scrub checkpointed blocks, we want to warn the user that we
* skipped scanning some blocks if a checkpoint exists or existed at any
* time during the scan. If a sequential instead of healing reconstruction
* was performed then the blocks were reconstructed. However, their checksums
* have not been verified so we still print the warning.
*/
static void
print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
{
if (ps == NULL || pcs == NULL)
return;
if (pcs->pcs_state == CS_NONE ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
return;
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
if (ps->pss_state == DSS_NONE)
return;
if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
ps->pss_end_time < pcs->pcs_start_time)
return;
if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
(void) printf(gettext(" scan warning: skipped blocks "
"that are only referenced by the checkpoint.\n"));
} else {
assert(ps->pss_state == DSS_SCANNING);
(void) printf(gettext(" scan warning: skipping blocks "
"that are only referenced by the checkpoint.\n"));
}
}
/*
* Returns B_TRUE if there is an active rebuild in progress. Otherwise,
* B_FALSE is returned and 'rebuild_end_time' is set to the end time for
* the last completed (or cancelled) rebuild.
*/
static boolean_t
check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
{
nvlist_t **child;
uint_t children;
boolean_t rebuilding = B_FALSE;
uint64_t end_time = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
if (vrs->vrs_end_time > end_time)
end_time = vrs->vrs_end_time;
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
rebuilding = B_TRUE;
end_time = 0;
break;
}
}
}
if (rebuild_end_time != NULL)
*rebuild_end_time = end_time;
return (rebuilding);
}
static void
vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
int depth, boolean_t isspare, char *parent, nvlist_t *item)
{
nvlist_t *vds, **child, *ch = NULL;
uint_t vsc, children;
vdev_stat_t *vs;
char *vname;
uint64_t notpresent;
const char *type, *path;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
return;
if (cb->cb_print_unhealthy && depth > 0 &&
for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
return;
}
vname = zpool_vdev_name(g_zfs, zhp, nv,
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
vds = fnvlist_alloc();
fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);
if (cb->cb_flat_vdevs && parent != NULL) {
fnvlist_add_string(vds, "parent", parent);
}
if (isspare) {
if (vs->vs_aux == VDEV_AUX_SPARED) {
fnvlist_add_string(vds, "state", "INUSE");
used_by_other(zhp, nv, vds);
} else if (vs->vs_state == VDEV_STATE_HEALTHY)
fnvlist_add_string(vds, "state", "AVAIL");
} else {
if (vs->vs_alloc) {
nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
}
if (vs->vs_space) {
nice_num_str_nvlist(vds, "total_space", vs->vs_space,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
}
if (vs->vs_dspace) {
nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
}
if (vs->vs_rsize) {
nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
}
if (vs->vs_esize) {
nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
}
if (vs->vs_self_healed) {
nice_num_str_nvlist(vds, "self_healed",
vs->vs_self_healed, cb->cb_literal,
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
}
if (vs->vs_pspace) {
nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
}
nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
nice_num_str_nvlist(vds, "checksum_errors",
vs->vs_checksum_errors, cb->cb_literal,
cb->cb_json_as_int, ZFS_NICENUM_1024);
if (vs->vs_scan_processed) {
nice_num_str_nvlist(vds, "scan_processed",
vs->vs_scan_processed, cb->cb_literal,
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
}
if (vs->vs_checkpoint_space) {
nice_num_str_nvlist(vds, "checkpoint_space",
vs->vs_checkpoint_space, cb->cb_literal,
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
}
if (vs->vs_resilver_deferred) {
nice_num_str_nvlist(vds, "resilver_deferred",
vs->vs_resilver_deferred, B_TRUE,
cb->cb_json_as_int, ZFS_NICENUM_1024);
}
if (children == 0) {
nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_1024);
}
if (cb->cb_print_power) {
if (children == 0) {
/* Only leaf vdevs have physical slots */
switch (zpool_power_current_state(zhp, (char *)
fnvlist_lookup_string(nv,
ZPOOL_CONFIG_PATH))) {
case 0:
fnvlist_add_string(vds, "power_state",
"off");
break;
case 1:
fnvlist_add_string(vds, "power_state",
"on");
break;
default:
fnvlist_add_string(vds, "power_state",
"-");
}
} else {
fnvlist_add_string(vds, "power_state", "-");
}
}
}
if (cb->cb_print_dio_verify) {
nice_num_str_nvlist(vds, "dio_verify_errors",
vs->vs_dio_verify_errors, cb->cb_literal,
cb->cb_json_as_int, ZFS_NICENUM_1024);
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
fnvlist_add_string(vds, "was",
fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));
} else if (vs->vs_aux != VDEV_AUX_NONE) {
fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);
} else if (children == 0 && !isspare &&
getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
vs->vs_configured_ashift < vs->vs_physical_ashift) {
nice_num_str_nvlist(vds, "configured_ashift",
vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,
ZFS_NICENUM_1024);
nice_num_str_nvlist(vds, "physical_ashift",
vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,
ZFS_NICENUM_1024);
}
if (vs->vs_scan_removing != 0) {
nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
}
if (cb->vcdl != NULL) {
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),
path, vds);
}
}
if (children == 0) {
if (cb->cb_print_vdev_init) {
if (vs->vs_initialize_state != 0) {
uint64_t st = vs->vs_initialize_state;
fnvlist_add_string(vds, "init_state",
vdev_init_state_str[st]);
nice_num_str_nvlist(vds, "initialized",
vs->vs_initialize_bytes_done,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(vds, "to_initialize",
vs->vs_initialize_bytes_est,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(vds, "init_time",
vs->vs_initialize_action_time,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(vds, "init_errors",
vs->vs_initialize_errors,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_1024);
} else {
fnvlist_add_string(vds, "init_state",
"UNINITIALIZED");
}
}
if (cb->cb_print_vdev_trim) {
if (vs->vs_trim_notsup == 0) {
if (vs->vs_trim_state != 0) {
uint64_t st = vs->vs_trim_state;
fnvlist_add_string(vds, "trim_state",
vdev_trim_state_str[st]);
nice_num_str_nvlist(vds, "trimmed",
vs->vs_trim_bytes_done,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(vds, "to_trim",
vs->vs_trim_bytes_est,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(vds, "trim_time",
vs->vs_trim_action_time,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(vds, "trim_errors",
vs->vs_trim_errors,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_1024);
} else
fnvlist_add_string(vds, "trim_state",
"UNTRIMMED");
}
nice_num_str_nvlist(vds, "trim_notsup",
vs->vs_trim_notsup, B_TRUE,
cb->cb_json_as_int, ZFS_NICENUM_1024);
}
} else {
ch = fnvlist_alloc();
}
if (cb->cb_flat_vdevs && children == 0) {
fnvlist_add_nvlist(item, vname, vds);
}
for (int c = 0; c < children; c++) {
uint64_t islog = B_FALSE, ishole = B_FALSE;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
if (islog || ishole)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
if (cb->cb_flat_vdevs) {
vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
vname, item);
}
vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
vname, ch);
}
if (ch != NULL) {
if (!nvlist_empty(ch))
fnvlist_add_nvlist(vds, "vdevs", ch);
fnvlist_free(ch);
}
fnvlist_add_nvlist(item, vname, vds);
fnvlist_free(vds);
free(vname);
}
static void
class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
const char *class, nvlist_t *item)
{
uint_t c, children;
nvlist_t **child;
nvlist_t *class_obj = NULL;
if (!cb->cb_flat_vdevs)
class_obj = fnvlist_alloc();
assert(zhp != NULL || !cb->cb_verbose);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
const char *bias = NULL;
const char *type = NULL;
char *name = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log) {
bias = (char *)VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class) != 0)
continue;
if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (cb->cb_flat_vdevs) {
vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
NULL, item);
} else {
vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
NULL, class_obj);
}
free(name);
}
if (!cb->cb_flat_vdevs) {
if (!nvlist_empty(class_obj))
fnvlist_add_nvlist(item, class, class_obj);
fnvlist_free(class_obj);
}
}
static void
l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
nvlist_t *item)
{
nvlist_t *l2c = NULL, **l2cache;
uint_t nl2cache;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
if (nl2cache == 0)
return;
if (!cb->cb_flat_vdevs)
l2c = fnvlist_alloc();
for (int i = 0; i < nl2cache; i++) {
if (cb->cb_flat_vdevs) {
vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
B_FALSE, NULL, item);
} else {
vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
B_FALSE, NULL, l2c);
}
}
}
if (!cb->cb_flat_vdevs) {
if (!nvlist_empty(l2c))
fnvlist_add_nvlist(item, "l2cache", l2c);
fnvlist_free(l2c);
}
}
static void
spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
nvlist_t *item)
{
nvlist_t *sp = NULL, **spares;
uint_t nspares;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
if (nspares == 0)
return;
if (!cb->cb_flat_vdevs)
sp = fnvlist_alloc();
for (int i = 0; i < nspares; i++) {
if (cb->cb_flat_vdevs) {
vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
NULL, item);
} else {
vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
NULL, sp);
}
}
}
if (!cb->cb_flat_vdevs) {
if (!nvlist_empty(sp))
fnvlist_add_nvlist(item, "spares", sp);
fnvlist_free(sp);
}
}
static void
errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
{
uint64_t nerr;
nvlist_t *config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
&nerr) == 0) {
nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
if (nerr != 0 && cb->cb_verbose) {
nvlist_t *nverrlist = NULL;
if (zpool_get_errlog(zhp, &nverrlist) == 0) {
int i = 0;
int count = 0;
size_t len = MAXPATHLEN * 2;
nvpair_t *elem = NULL;
for (nvpair_t *pair =
nvlist_next_nvpair(nverrlist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(nverrlist, pair))
count++;
char **errl = (char **)malloc(
count * sizeof (char *));
while ((elem = nvlist_next_nvpair(nverrlist,
elem)) != NULL) {
nvlist_t *nv;
uint64_t dsobj, obj;
verify(nvpair_value_nvlist(elem,
&nv) == 0);
verify(nvlist_lookup_uint64(nv,
ZPOOL_ERR_DATASET, &dsobj) == 0);
verify(nvlist_lookup_uint64(nv,
ZPOOL_ERR_OBJECT, &obj) == 0);
errl[i] = safe_malloc(len);
zpool_obj_to_path(zhp, dsobj, obj,
errl[i++], len);
}
nvlist_free(nverrlist);
fnvlist_add_string_array(item, "errlist",
(const char **)errl, count);
for (int i = 0; i < count; ++i)
free(errl[i]);
free(errl);
} else
fnvlist_add_string(item, "errlist",
strerror(errno));
}
}
}
static void
ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)
{
nice_num_str_nvlist(item, "blocks", dds->dds_blocks,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(item, "physical_size", dds->dds_psize,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
}
static void
dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
{
nvlist_t *config;
if (cb->cb_dedup_stats) {
ddt_histogram_t *ddh;
ddt_stat_t *dds;
ddt_object_t *ddo;
nvlist_t *ddt_stat, *ddt_obj, *dedup;
uint_t c;
uint64_t cspace_prop;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_uint64_array(config,
ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)
return;
dedup = fnvlist_alloc();
ddt_obj = fnvlist_alloc();
nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
if (ddo->ddo_count == 0) {
fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
ddt_obj);
fnvlist_add_nvlist(item, "dedup_stats", dedup);
fnvlist_free(ddt_obj);
fnvlist_free(dedup);
return;
} else {
nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_1024);
nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_1024);
/*
* Squash cached size into in-core size to handle race.
* Only include cached size if it is available.
*/
cspace_prop = zpool_get_prop_int(zhp,
ZPOOL_PROP_DEDUPCACHED, NULL);
cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
nice_num_str_nvlist(dedup, "cspace", cspace_prop,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_1024);
}
ddt_stat = fnvlist_alloc();
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
(uint64_t **)&dds, &c) == 0) {
nvlist_t *total = fnvlist_alloc();
if (dds->dds_blocks == 0)
fnvlist_add_string(total, "blocks", "0");
else
ddt_stats_nvlist(dds, cb, total);
fnvlist_add_nvlist(ddt_stat, "total", total);
fnvlist_free(total);
}
if (nvlist_lookup_uint64_array(config,
ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {
nvlist_t *hist = fnvlist_alloc();
nvlist_t *entry = NULL;
char buf[16];
for (int h = 0; h < 64; h++) {
if (ddh->ddh_stat[h].dds_blocks != 0) {
entry = fnvlist_alloc();
ddt_stats_nvlist(&ddh->ddh_stat[h], cb,
entry);
snprintf(buf, 16, "%d", h);
fnvlist_add_nvlist(hist, buf, entry);
fnvlist_free(entry);
}
}
if (!nvlist_empty(hist))
fnvlist_add_nvlist(ddt_stat, "histogram", hist);
fnvlist_free(hist);
}
if (!nvlist_empty(ddt_obj)) {
fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
ddt_obj);
}
fnvlist_free(ddt_obj);
if (!nvlist_empty(ddt_stat)) {
fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,
ddt_stat);
}
fnvlist_free(ddt_stat);
if (!nvlist_empty(dedup))
fnvlist_add_nvlist(item, "dedup_stats", dedup);
fnvlist_free(dedup);
}
}
static void
raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
nvlist_t *nvroot, nvlist_t *item)
{
uint_t c;
pool_raidz_expand_stat_t *pres = NULL;
if (nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {
nvlist_t **child;
uint_t children;
nvlist_t *nv = fnvlist_alloc();
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0);
assert(pres->pres_expanding_vdev < children);
char *name =
zpool_vdev_name(g_zfs, zhp,
child[pres->pres_expanding_vdev], 0);
fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);
fnvlist_add_string(nv, "state",
pool_scan_state_str[pres->pres_state]);
nice_num_str_nvlist(nv, "expanding_vdev",
pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,
ZFS_NICENUM_1024);
nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "waiting_for_resilver",
pres->pres_waiting_for_resilver, B_TRUE,
cb->cb_json_as_int, ZFS_NICENUM_1024);
fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);
fnvlist_free(nv);
free(name);
}
}
static void
checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,
nvlist_t *item)
{
uint_t c;
pool_checkpoint_stat_t *pcs = NULL;
if (nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_string(nv, "state",
checkpoint_state_str[pcs->pcs_state]);
nice_num_str_nvlist(nv, "start_time",
pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,
ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(nv, "space",
pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);
fnvlist_free(nv);
}
}
static void
removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
nvlist_t *nvroot, nvlist_t *item)
{
uint_t c;
pool_removal_stat_t *prs = NULL;
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,
(uint64_t **)&prs, &c) == 0) {
if (prs->prs_state != DSS_NONE) {
nvlist_t **child;
uint_t children;
verify(nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
assert(prs->prs_removing_vdev < children);
char *vdev_name = zpool_vdev_name(g_zfs, zhp,
child[prs->prs_removing_vdev], B_TRUE);
nvlist_t *nv = fnvlist_alloc();
fill_vdev_info(nv, zhp, vdev_name, B_FALSE,
cb->cb_json_as_int);
fnvlist_add_string(nv, "state",
pool_scan_state_str[prs->prs_state]);
nice_num_str_nvlist(nv, "removing_vdev",
prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,
ZFS_NICENUM_1024);
nice_num_str_nvlist(nv, "start_time",
prs->prs_start_time, cb->cb_literal,
cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "copied", prs->prs_copied,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "mapping_memory",
prs->prs_mapping_memory, cb->cb_literal,
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
fnvlist_add_nvlist(item,
ZPOOL_CONFIG_REMOVAL_STATS, nv);
fnvlist_free(nv);
free(vdev_name);
}
}
}
static void
scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
nvlist_t *nvroot, nvlist_t *item)
{
pool_scan_stat_t *ps = NULL;
uint_t c;
nvlist_t *scan = fnvlist_alloc();
nvlist_t **child;
uint_t children;
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c) == 0) {
fnvlist_add_string(scan, "function",
pool_scan_func_str[ps->pss_func]);
fnvlist_add_string(scan, "state",
pool_scan_state_str[ps->pss_state]);
nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(scan, "examined", ps->pss_examined,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(scan, "processed", ps->pss_processed,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(scan, "errors", ps->pss_errors,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
nice_num_str_nvlist(scan, "scrub_pause",
ps->pss_pass_scrub_pause, cb->cb_literal,
cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(scan, "scrub_spent_paused",
ps->pss_pass_scrub_spent_paused,
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
nice_num_str_nvlist(scan, "issued_bytes_per_scan",
ps->pss_pass_issued, cb->cb_literal,
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
nice_num_str_nvlist(scan, "issued", ps->pss_issued,
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
ps->pss_error_scrub_start > ps->pss_start_time) {
fnvlist_add_string(scan, "err_scrub_func",
pool_scan_func_str[ps->pss_error_scrub_func]);
fnvlist_add_string(scan, "err_scrub_state",
pool_scan_state_str[ps->pss_error_scrub_state]);
nice_num_str_nvlist(scan, "err_scrub_start_time",
ps->pss_error_scrub_start,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(scan, "err_scrub_end_time",
ps->pss_error_scrub_end,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(scan, "err_scrub_examined",
ps->pss_error_scrub_examined,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_1024);
nice_num_str_nvlist(scan, "err_scrub_to_examine",
ps->pss_error_scrub_to_be_examined,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_1024);
nice_num_str_nvlist(scan, "err_scrub_pause",
ps->pss_pass_error_scrub_pause,
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
vdev_rebuild_stat_t *vrs;
uint_t i;
char *name;
nvlist_t *nv;
nvlist_t *rebuild = fnvlist_alloc();
uint64_t st;
for (uint_t c = 0; c < children; c++) {
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,
&i) == 0) {
if (vrs->vrs_state != VDEV_REBUILD_NONE) {
nv = fnvlist_alloc();
name = zpool_vdev_name(g_zfs, zhp,
child[c], VDEV_NAME_TYPE_ID);
fill_vdev_info(nv, zhp, name, B_FALSE,
cb->cb_json_as_int);
st = vrs->vrs_state;
fnvlist_add_string(nv, "state",
vdev_rebuild_state_str[st]);
nice_num_str_nvlist(nv, "start_time",
vrs->vrs_start_time, cb->cb_literal,
cb->cb_json_as_int,
ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(nv, "end_time",
vrs->vrs_end_time, cb->cb_literal,
cb->cb_json_as_int,
ZFS_NICE_TIMESTAMP);
nice_num_str_nvlist(nv, "scan_time",
vrs->vrs_scan_time_ms * 1000000,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_TIME);
nice_num_str_nvlist(nv, "scanned",
vrs->vrs_bytes_scanned,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "issued",
vrs->vrs_bytes_issued,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "rebuilt",
vrs->vrs_bytes_rebuilt,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "to_scan",
vrs->vrs_bytes_est, cb->cb_literal,
cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "errors",
vrs->vrs_errors, cb->cb_literal,
cb->cb_json_as_int,
ZFS_NICENUM_1024);
nice_num_str_nvlist(nv, "pass_time",
vrs->vrs_pass_time_ms * 1000000,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_TIME);
nice_num_str_nvlist(nv, "pass_scanned",
vrs->vrs_pass_bytes_scanned,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "pass_issued",
vrs->vrs_pass_bytes_issued,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
nice_num_str_nvlist(nv, "pass_skipped",
vrs->vrs_pass_bytes_skipped,
cb->cb_literal, cb->cb_json_as_int,
ZFS_NICENUM_BYTES);
fnvlist_add_nvlist(rebuild, name, nv);
free(name);
}
}
}
if (!nvlist_empty(rebuild))
fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);
fnvlist_free(rebuild);
}
if (!nvlist_empty(scan))
fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);
fnvlist_free(scan);
}
/*
* Print the scan status.
*/
static void
print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
uint64_t rebuild_end_time = 0, resilver_end_time = 0;
boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
boolean_t have_errorscrub = B_FALSE;
boolean_t active_resilver = B_FALSE;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *ps = NULL;
uint_t c;
time_t scrub_start = 0, errorscrub_start = 0;
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c) == 0) {
if (ps->pss_func == POOL_SCAN_RESILVER) {
resilver_end_time = ps->pss_end_time;
active_resilver = (ps->pss_state == DSS_SCANNING);
}
have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
scrub_start = ps->pss_start_time;
if (c > offsetof(pool_scan_stat_t,
pss_pass_error_scrub_pause) / 8) {
have_errorscrub = (ps->pss_error_scrub_func ==
POOL_SCAN_ERRORSCRUB);
errorscrub_start = ps->pss_error_scrub_start;
}
}
boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
/* Always print the scrub status when available. */
if (have_scrub && scrub_start > errorscrub_start)
print_scan_scrub_resilver_status(ps);
else if (have_errorscrub && errorscrub_start >= scrub_start)
print_err_scrub_status(ps);
/*
* When there is an active resilver or rebuild print its status.
* Otherwise print the status of the last resilver or rebuild.
*/
if (active_resilver || (!active_rebuild && have_resilver &&
resilver_end_time && resilver_end_time > rebuild_end_time)) {
print_scan_scrub_resilver_status(ps);
} else if (active_rebuild || (!active_resilver && have_rebuild &&
rebuild_end_time && rebuild_end_time > resilver_end_time)) {
print_rebuild_status(zhp, nvroot);
}
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_scan_warning(ps, pcs);
}
/*
* Print out detailed removal status.
*/
static void
print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
{
char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
time_t start, end;
nvlist_t *config, *nvroot;
nvlist_t **child;
uint_t children;
char *vdev_name;
if (prs == NULL || prs->prs_state == DSS_NONE)
return;
/*
* Determine name of vdev.
*/
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0);
assert(prs->prs_removing_vdev < children);
vdev_name = zpool_vdev_name(g_zfs, zhp,
child[prs->prs_removing_vdev], B_TRUE);
printf_color(ANSI_BOLD, gettext("remove: "));
start = prs->prs_start_time;
end = prs->prs_end_time;
zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
/*
* Removal is finished or canceled.
*/
if (prs->prs_state == DSS_FINISHED) {
uint64_t minutes_taken = (end - start) / 60;
(void) printf(gettext("Removal of vdev %llu copied %s "
"in %lluh%um, completed on %s"),
(longlong_t)prs->prs_removing_vdev,
copied_buf,
(u_longlong_t)(minutes_taken / 60),
(uint_t)(minutes_taken % 60),
ctime((time_t *)&end));
} else if (prs->prs_state == DSS_CANCELED) {
(void) printf(gettext("Removal of %s canceled on %s"),
vdev_name, ctime(&end));
} else {
uint64_t copied, total, elapsed, rate, mins_left, hours_left;
double fraction_done;
assert(prs->prs_state == DSS_SCANNING);
/*
* Removal is in progress.
*/
(void) printf(gettext(
"Evacuation of %s in progress since %s"),
vdev_name, ctime(&start));
copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
total = prs->prs_to_copy;
fraction_done = (double)copied / total;
/* elapsed time for this pass */
elapsed = time(NULL) - prs->prs_start_time;
elapsed = elapsed > 0 ? elapsed : 1;
rate = copied / elapsed;
rate = rate > 0 ? rate : 1;
mins_left = ((total - copied) / rate) / 60;
hours_left = mins_left / 60;
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
zfs_nicenum(total, total_buf, sizeof (total_buf));
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
/*
* do not print estimated time if hours_left is more than
* 30 days
*/
(void) printf(gettext(
"\t%s copied out of %s at %s/s, %.2f%% done"),
examined_buf, total_buf, rate_buf, 100 * fraction_done);
if (hours_left < (30 * 24)) {
(void) printf(gettext(", %lluh%um to go\n"),
(u_longlong_t)hours_left, (uint_t)(mins_left % 60));
} else {
(void) printf(gettext(
", (copy is slow, no estimated time)\n"));
}
}
free(vdev_name);
if (prs->prs_mapping_memory > 0) {
char mem_buf[7];
zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
(void) printf(gettext(
"\t%s memory used for removed device mappings\n"),
mem_buf);
}
}
/*
* Print out detailed raidz expansion status.
*/
static void
print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
{
char copied_buf[7];
if (pres == NULL || pres->pres_state == DSS_NONE)
return;
/*
* Determine name of vdev.
*/
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
nvlist_t **child;
uint_t children;
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0);
assert(pres->pres_expanding_vdev < children);
printf_color(ANSI_BOLD, gettext("expand: "));
time_t start = pres->pres_start_time;
time_t end = pres->pres_end_time;
char *vname =
zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
/*
* Expansion is finished or canceled.
*/
if (pres->pres_state == DSS_FINISHED) {
char time_buf[32];
secs_to_dhms(end - start, time_buf);
(void) printf(gettext("expanded %s-%u copied %s in %s, "
"on %s"), vname, (int)pres->pres_expanding_vdev,
copied_buf, time_buf, ctime((time_t *)&end));
} else {
char examined_buf[7], total_buf[7], rate_buf[7];
uint64_t copied, total, elapsed, rate, secs_left;
double fraction_done;
assert(pres->pres_state == DSS_SCANNING);
/*
* Expansion is in progress.
*/
(void) printf(gettext(
"expansion of %s-%u in progress since %s"),
vname, (int)pres->pres_expanding_vdev, ctime(&start));
copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
total = pres->pres_to_reflow;
fraction_done = (double)copied / total;
/* elapsed time for this pass */
elapsed = time(NULL) - pres->pres_start_time;
elapsed = elapsed > 0 ? elapsed : 1;
rate = copied / elapsed;
rate = rate > 0 ? rate : 1;
secs_left = (total - copied) / rate;
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
zfs_nicenum(total, total_buf, sizeof (total_buf));
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
/*
* do not print estimated time if hours_left is more than
* 30 days
*/
(void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
examined_buf, total_buf, rate_buf, 100 * fraction_done);
if (pres->pres_waiting_for_resilver) {
(void) printf(gettext(", paused for resilver or "
"clear\n"));
} else if (secs_left < (30 * 24 * 3600)) {
char time_buf[32];
secs_to_dhms(secs_left, time_buf);
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(
", (copy is slow, no estimated time)\n"));
}
}
free(vname);
}
static void
print_checkpoint_status(pool_checkpoint_stat_t *pcs)
{
time_t start;
char space_buf[7];
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return;
(void) printf(gettext("checkpoint: "));
start = pcs->pcs_start_time;
zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
char *date = ctime(&start);
/*
* ctime() adds a newline at the end of the generated
* string, thus the weird format specifier and the
* strlen() call used to chop it off from the output.
*/
(void) printf(gettext("created %.*s, consumes %s\n"),
(int)(strlen(date) - 1), date, space_buf);
return;
}
assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
(void) printf(gettext("discarding, %s remaining.\n"),
space_buf);
}
static void
print_error_log(zpool_handle_t *zhp)
{
nvlist_t *nverrlist = NULL;
nvpair_t *elem;
char *pathname;
size_t len = MAXPATHLEN * 2;
if (zpool_get_errlog(zhp, &nverrlist) != 0)
return;
(void) printf("errors: Permanent errors have been "
"detected in the following files:\n\n");
pathname = safe_malloc(len);
elem = NULL;
while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
nvlist_t *nv;
uint64_t dsobj, obj;
verify(nvpair_value_nvlist(elem, &nv) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
&dsobj) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
&obj) == 0);
zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
(void) printf("%7s %s\n", "", pathname);
}
free(pathname);
nvlist_free(nverrlist);
}
static void
print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
uint_t nspares)
{
uint_t i;
char *name;
if (nspares == 0)
return;
(void) printf(gettext("\tspares\n"));
for (i = 0; i < nspares; i++) {
name = zpool_vdev_name(g_zfs, zhp, spares[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
free(name);
}
}
static void
print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
uint_t nl2cache)
{
uint_t i;
char *name;
if (nl2cache == 0)
return;
(void) printf(gettext("\tcache\n"));
for (i = 0; i < nl2cache; i++) {
name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, l2cache[i], 2,
B_FALSE, NULL);
free(name);
}
}
static void
print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)
{
ddt_histogram_t *ddh;
ddt_stat_t *dds;
ddt_object_t *ddo;
uint_t c;
/* Extra space provided for literal display */
char dspace[32], mspace[32], cspace[32];
uint64_t cspace_prop;
enum zfs_nicenum_format format;
zprop_source_t src;
/*
* If the pool was faulted then we may not have been able to
* obtain the config. Otherwise, if we have anything in the dedup
* table continue processing the stats.
*/
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
(uint64_t **)&ddo, &c) != 0)
return;
(void) printf("\n");
(void) printf(gettext(" dedup: "));
if (ddo->ddo_count == 0) {
(void) printf(gettext("no DDT entries\n"));
return;
}
/*
* Squash cached size into in-core size to handle race.
* Only include cached size if it is available.
*/
cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src);
cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024;
zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format);
zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format);
zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format);
(void) printf("DDT entries %llu, size %s on disk, %s in core",
(u_longlong_t)ddo->ddo_count,
dspace,
mspace);
if (src != ZPROP_SRC_DEFAULT) {
(void) printf(", %s cached (%.02f%%)",
cspace,
(double)cspace_prop / (double)ddo->ddo_mspace * 100.0);
}
(void) printf("\n");
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
(uint64_t **)&dds, &c) == 0);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
(uint64_t **)&ddh, &c) == 0);
zpool_dump_ddt(dds, ddh);
}
#define ST_SIZE 4096
#define AC_SIZE 2048
static void
print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)
{
char status[ST_SIZE];
char action[AC_SIZE];
memset(status, 0, ST_SIZE);
memset(action, 0, AC_SIZE);
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
snprintf(status, ST_SIZE, gettext("One or more devices could "
"not be opened. Sufficient replicas exist for\n\tthe pool "
"to continue functioning in a degraded state.\n"));
snprintf(action, AC_SIZE, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_NR:
snprintf(status, ST_SIZE, gettext("One or more devices could "
"not be opened. There are insufficient\n\treplicas for the"
" pool to continue functioning.\n"));
snprintf(action, AC_SIZE, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
snprintf(status, ST_SIZE, gettext("One or more devices could "
"not be used because the label is missing or\n\tinvalid. "
"Sufficient replicas exist for the pool to continue\n\t"
"functioning in a degraded state.\n"));
snprintf(action, AC_SIZE, gettext("Replace the device using "
"'zpool replace'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
snprintf(status, ST_SIZE, gettext("One or more devices could "
"not be used because the label is missing \n\tor invalid. "
"There are insufficient replicas for the pool to "
"continue\n\tfunctioning.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
action, AC_SIZE);
break;
case ZPOOL_STATUS_FAILING_DEV:
snprintf(status, ST_SIZE, gettext("One or more devices has "
"experienced an unrecoverable error. An\n\tattempt was "
"made to correct the error. Applications are "
"unaffected.\n"));
snprintf(action, AC_SIZE, gettext("Determine if the "
"device needs to be replaced, and clear the errors\n\tusing"
" 'zpool clear' or replace the device with 'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
snprintf(status, ST_SIZE, gettext("One or more devices has "
"been taken offline by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
snprintf(action, AC_SIZE, gettext("Online the device "
"using 'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_REMOVED_DEV:
snprintf(status, ST_SIZE, gettext("One or more devices has "
"been removed by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
snprintf(action, AC_SIZE, gettext("Online the device "
"using zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
snprintf(status, ST_SIZE, gettext("One or more devices is "
"currently being resilvered. The pool will\n\tcontinue "
"to function, possibly in a degraded state.\n"));
snprintf(action, AC_SIZE, gettext("Wait for the resilver to "
"complete.\n"));
break;
case ZPOOL_STATUS_REBUILD_SCRUB:
snprintf(status, ST_SIZE, gettext("One or more devices have "
"been sequentially resilvered, scrubbing\n\tthe pool "
"is recommended.\n"));
snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
"verify all data checksums.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
snprintf(status, ST_SIZE, gettext("One or more devices has "
"experienced an error resulting in data\n\tcorruption. "
"Applications may be affected.\n"));
snprintf(action, AC_SIZE, gettext("Restore the file in question"
" if possible. Otherwise restore the\n\tentire pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
snprintf(status, ST_SIZE, gettext("The pool metadata is "
"corrupted and the pool cannot be opened.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
action, AC_SIZE);
break;
case ZPOOL_STATUS_VERSION_OLDER:
snprintf(status, ST_SIZE, gettext("The pool is formatted using "
"a legacy on-disk format. The pool can\n\tstill be used, "
"but some features are unavailable.\n"));
snprintf(action, AC_SIZE, gettext("Upgrade the pool using "
"'zpool upgrade'. Once this is done, the\n\tpool will no "
"longer be accessible on software that does not support\n\t"
"feature flags.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
snprintf(status, ST_SIZE, gettext("The pool has been upgraded "
"to a newer, incompatible on-disk version.\n\tThe pool "
"cannot be accessed on this system.\n"));
snprintf(action, AC_SIZE, gettext("Access the pool from a "
"system running more recent software, or\n\trestore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
snprintf(status, ST_SIZE, gettext("Some supported and "
"requested features are not enabled on the pool.\n\t"
"The pool can still be used, but some features are "
"unavailable.\n"));
snprintf(action, AC_SIZE, gettext("Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(7) for details.\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
snprintf(status, ST_SIZE, gettext("This pool has a "
"compatibility list specified, but it could not be\n\t"
"read/parsed at this time. The pool can still be used, "
"but this\n\tshould be investigated.\n"));
snprintf(action, AC_SIZE, gettext("Check the value of the "
"'compatibility' property against the\n\t"
"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
snprintf(status, ST_SIZE, gettext("One or more features "
"are enabled on the pool despite not being\n\t"
"requested by the 'compatibility' property.\n"));
snprintf(action, AC_SIZE, gettext("Consider setting "
"'compatibility' to an appropriate value, or\n\t"
"adding needed features to the relevant file in\n\t"
ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
snprintf(status, ST_SIZE, gettext("The pool cannot be accessed "
"on this system because it uses the\n\tfollowing feature(s)"
" not supported on this system:\n"));
zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
1024);
snprintf(action, AC_SIZE, gettext("Access the pool from a "
"system that supports the required feature(s),\n\tor "
"restore the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
snprintf(status, ST_SIZE, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
1024);
snprintf(action, AC_SIZE, gettext("The pool cannot be accessed "
"in read-write mode. Import the pool with\n"
"\t\"-o readonly=on\", access the pool from a system that "
"supports the\n\trequired feature(s), or restore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
snprintf(status, ST_SIZE, gettext("One or more devices are "
"faulted in response to persistent errors.\n\tSufficient "
"replicas exist for the pool to continue functioning "
"in a\n\tdegraded state.\n"));
snprintf(action, AC_SIZE, gettext("Replace the faulted device, "
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_NR:
snprintf(status, ST_SIZE, gettext("One or more devices are "
"faulted in response to persistent errors. There are "
"insufficient replicas for the pool to\n\tcontinue "
"functioning.\n"));
snprintf(action, AC_SIZE, gettext("Destroy and re-create the "
"pool from a backup source. Manually marking the device\n"
"\trepaired using 'zpool clear' may allow some data "
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
snprintf(status, ST_SIZE, gettext("The pool is suspended "
"because multihost writes failed or were delayed;\n\t"
"another system could import the pool undetected.\n"));
snprintf(action, AC_SIZE, gettext("Make sure the pool's devices"
" are connected, then reboot your system and\n\timport the "
"pool or run 'zpool clear' to resume the pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
snprintf(status, ST_SIZE, gettext("One or more devices are "
"faulted in response to IO failures.\n"));
snprintf(action, AC_SIZE, gettext("Make sure the affected "
"devices are connected, then run 'zpool clear'.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
snprintf(status, ST_SIZE, gettext("An intent log record "
"could not be read.\n"
"\tWaiting for administrator intervention to fix the "
"faulted pool.\n"));
snprintf(action, AC_SIZE, gettext("Either restore the affected "
"device(s) and run 'zpool online',\n"
"\tor ignore the intent log records by running "
"'zpool clear'.\n"));
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
snprintf(status, ST_SIZE, gettext("One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
snprintf(action, AC_SIZE, gettext("Replace affected devices "
"with devices that support the\n\tconfigured block size, "
"or migrate data to a properly configured\n\tpool.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid"
" and system hostid on imported pool.\n\tThis pool was "
"previously imported into a system with a different "
"hostid,\n\tand then was verbatim imported into this "
"system.\n"));
snprintf(action, AC_SIZE, gettext("Export this pool on all "
"systems on which it is imported.\n"
"\tThen import it to correct the mismatch.\n"));
break;
case ZPOOL_STATUS_ERRATA:
snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"),
errata);
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
snprintf(action, AC_SIZE, gettext("To correct the issue"
" run 'zpool scrub'.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) strlcat(status, gettext("\tExisting encrypted "
"datasets contain an on-disk incompatibility\n\t "
"which needs to be corrected.\n"), ST_SIZE);
snprintf(action, AC_SIZE, gettext("To correct the issue"
" backup existing encrypted datasets to new\n\t"
"encrypted datasets and destroy the old ones. "
"'zfs mount -o ro' can\n\tbe used to temporarily "
"mount existing encrypted datasets readonly.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) strlcat(status, gettext("\tExisting encrypted "
"snapshots and bookmarks contain an on-disk\n\t"
"incompatibility. This may cause on-disk "
"corruption if they are used\n\twith "
"'zfs recv'.\n"), ST_SIZE);
snprintf(action, AC_SIZE, gettext("To correct the"
"issue, enable the bookmark_v2 feature. No "
"additional\n\taction is needed if there are no "
"encrypted snapshots or bookmarks.\n\tIf preserving"
"the encrypted snapshots and bookmarks is required,"
" use\n\ta non-raw send to backup and restore them."
" Alternately, they may be\n\tremoved to resolve "
"the incompatibility.\n"));
break;
default:
/*
* All errata which allow the pool to be imported
* must contain an action message.
*/
assert(0);
}
break;
default:
/*
* The remaining errors can't actually be generated, yet.
*/
assert(reason == ZPOOL_STATUS_OK);
}
if (status[0] != 0) {
if (cbp->cb_json)
fnvlist_add_string(item, "status", status);
else {
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, status);
}
}
if (action[0] != 0) {
if (cbp->cb_json)
fnvlist_add_string(item, "action", action);
else {
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, action);
}
}
}
static int
status_callback_json(zpool_handle_t *zhp, void *data)
{
status_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
const char *msgid;
char pool_guid[256];
char msgbuf[256];
uint64_t guid;
zpool_status_t reason;
zpool_errata_t errata;
uint_t c;
vdev_stat_t *vs;
nvlist_t *item, *d, *load_info, *vds;
item = d = NULL;
/* If dedup stats were requested, also fetch dedupcached. */
if (cbp->cb_dedup_stats > 1)
zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
reason = zpool_get_status(zhp, &msgid, &errata);
/*
* If we were given 'zpool status -x', only report those pools with
* problems.
*/
if (cbp->cb_explain &&
(reason == ZPOOL_STATUS_OK ||
reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED ||
reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
return (0);
}
d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
item = fnvlist_alloc();
vds = fnvlist_alloc();
fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);
config = zpool_get_config(zhp, NULL);
if (config != NULL) {
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);
if (cbp->cb_json_pool_key_guid) {
guid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID);
snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid);
}
cbp->cb_count++;
print_status_reason(zhp, cbp, reason, errata, item);
if (msgid != NULL) {
snprintf(msgbuf, 256,
"https://openzfs.github.io/openzfs-docs/msg/%s",
msgid);
fnvlist_add_string(item, "msgid", msgid);
fnvlist_add_string(item, "moreinfo", msgbuf);
}
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
&load_info) == 0) {
fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,
load_info);
}
scan_status_nvlist(zhp, cbp, nvroot, item);
removal_status_nvlist(zhp, cbp, nvroot, item);
checkpoint_status_nvlist(nvroot, cbp, item);
raidz_expand_status_nvlist(zhp, cbp, nvroot, item);
vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);
if (cbp->cb_flat_vdevs) {
class_vdevs_nvlist(zhp, cbp, nvroot,
VDEV_ALLOC_BIAS_DEDUP, vds);
class_vdevs_nvlist(zhp, cbp, nvroot,
VDEV_ALLOC_BIAS_SPECIAL, vds);
class_vdevs_nvlist(zhp, cbp, nvroot,
VDEV_ALLOC_CLASS_LOGS, vds);
l2cache_nvlist(zhp, cbp, nvroot, vds);
spares_nvlist(zhp, cbp, nvroot, vds);
fnvlist_add_nvlist(item, "vdevs", vds);
fnvlist_free(vds);
} else {
fnvlist_add_nvlist(item, "vdevs", vds);
fnvlist_free(vds);
class_vdevs_nvlist(zhp, cbp, nvroot,
VDEV_ALLOC_BIAS_DEDUP, item);
class_vdevs_nvlist(zhp, cbp, nvroot,
VDEV_ALLOC_BIAS_SPECIAL, item);
class_vdevs_nvlist(zhp, cbp, nvroot,
VDEV_ALLOC_CLASS_LOGS, item);
l2cache_nvlist(zhp, cbp, nvroot, item);
spares_nvlist(zhp, cbp, nvroot, item);
}
dedup_stats_nvlist(zhp, cbp, item);
errors_nvlist(zhp, cbp, item);
}
if (cbp->cb_json_pool_key_guid) {
fnvlist_add_nvlist(d, pool_guid, item);
} else {
fnvlist_add_nvlist(d, zpool_get_name(zhp),
item);
}
fnvlist_free(item);
return (0);
}
/*
* Display a summary of pool status. Displays a summary such as:
*
* pool: tank
* status: DEGRADED
* reason: One or more devices ...
* see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
* config:
* mirror DEGRADED
* c1t0d0 OK
* c2t0d0 UNAVAIL
*
* When given the '-v' option, we print out the complete config. If the '-e'
* option is specified, then we print out error rate information as well.
*/
static int
status_callback(zpool_handle_t *zhp, void *data)
{
status_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
const char *msgid;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t c;
vdev_stat_t *vs;
/* If dedup stats were requested, also fetch dedupcached. */
if (cbp->cb_dedup_stats > 1)
zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
config = zpool_get_config(zhp, NULL);
reason = zpool_get_status(zhp, &msgid, &errata);
cbp->cb_count++;
/*
* If we were given 'zpool status -x', only report those pools with
* problems.
*/
if (cbp->cb_explain &&
(reason == ZPOOL_STATUS_OK ||
reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED ||
reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
if (!cbp->cb_allpools) {
(void) printf(gettext("pool '%s' is healthy\n"),
zpool_get_name(zhp));
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
}
return (0);
}
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
else
(void) printf("\n");
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
health = zpool_get_state_str(zhp);
printf(" ");
printf_color(ANSI_BOLD, gettext("pool:"));
printf(" %s\n", zpool_get_name(zhp));
fputc(' ', stdout);
printf_color(ANSI_BOLD, gettext("state: "));
printf_color(health_str_to_color(health), "%s", health);
fputc('\n', stdout);
print_status_reason(zhp, cbp, reason, errata, NULL);
if (msgid != NULL) {
printf(" ");
printf_color(ANSI_BOLD, gettext("see:"));
printf(gettext(
" https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
if (config != NULL) {
uint64_t nerr;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
print_scan_status(zhp, nvroot);
pool_removal_stat_t *prs = NULL;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
print_removal_status(zhp, prs);
pool_checkpoint_stat_t *pcs = NULL;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_status(pcs);
pool_raidz_expand_stat_t *pres = NULL;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
print_raidz_expand_status(zhp, pres);
cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cbp->cb_namewidth < 10)
cbp->cb_namewidth = 10;
color_start(ANSI_BOLD);
(void) printf(gettext("config:\n\n"));
(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
"CKSUM");
color_end();
if (cbp->cb_print_slow_ios) {
printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
}
if (cbp->cb_print_power) {
printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
}
if (cbp->cb_print_dio_verify) {
printf_color(ANSI_BOLD, " %5s", gettext("DIO"));
}
if (cbp->vcdl != NULL)
print_cmd_columns(cbp->vcdl, 0);
printf("\n");
print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
B_FALSE, NULL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0)
print_l2cache(zhp, cbp, l2cache, nl2cache);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0)
print_spares(zhp, cbp, spares, nspares);
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
&nerr) == 0) {
(void) printf("\n");
if (nerr == 0) {
(void) printf(gettext(
"errors: No known data errors\n"));
} else if (!cbp->cb_verbose) {
color_start(ANSI_RED);
(void) printf(gettext("errors: %llu data "
"errors, use '-v' for a list\n"),
(u_longlong_t)nerr);
color_end();
} else {
print_error_log(zhp);
}
}
if (cbp->cb_dedup_stats)
print_dedup_stats(zhp, config, cbp->cb_literal);
} else {
(void) printf(gettext("config: The configuration cannot be "
"determined.\n"));
}
return (0);
}
/*
* zpool status [-c [script1,script2,...]] [-dDegiLpPstvx] [--power] ...
* [-T d|u] [pool] [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -d Display Direct I/O write verify errors
* -D Display dedup status (undocumented)
* -e Display only unhealthy vdevs
* -g Display guid for individual vdev name.
* -i Display vdev initialization status.
* -L Follow links when resolving vdev path name.
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -s Display slow IOs column.
* -t Display vdev TRIM status.
* -T Display a timestamp in date(1) or Unix format
* -v Display complete error logs
* -x Display only pools with potential problems
* -j Display output in JSON format
* --power Display vdev enclosure slot power status
* --json-int Display numbers in inteeger format instead of string
* --json-flat-vdevs Display vdevs in flat hierarchy
* --json-pool-key-guid Use pool GUID as key for pool objects
*
* Describes the health status of all pools or some subset.
*/
int
zpool_do_status(int argc, char **argv)
{
int c;
int ret;
float interval = 0;
unsigned long count = 0;
status_cbdata_t cb = { 0 };
nvlist_t *data;
char *cmd = NULL;
struct option long_options[] = {
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
{"json", no_argument, NULL, 'j'},
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
{"json-flat-vdevs", no_argument, NULL,
ZPOOL_OPTION_JSON_FLAT_VDEVS},
{"json-pool-key-guid", no_argument, NULL,
ZPOOL_OPTION_POOL_KEY_GUID},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options,
NULL)) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
break;
case 'd':
cb.cb_print_dio_verify = B_TRUE;
break;
case 'D':
if (++cb.cb_dedup_stats > 2)
cb.cb_dedup_stats = 2;
break;
case 'e':
cb.cb_print_unhealthy = B_TRUE;
break;
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'i':
cb.cb_print_vdev_init = B_TRUE;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 's':
cb.cb_print_slow_ios = B_TRUE;
break;
case 't':
cb.cb_print_vdev_trim = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'j':
cb.cb_json = B_TRUE;
break;
case 'x':
cb.cb_explain = B_TRUE;
break;
case ZPOOL_OPTION_POWER:
cb.cb_print_power = B_TRUE;
break;
case ZPOOL_OPTION_JSON_FLAT_VDEVS:
cb.cb_flat_vdevs = B_TRUE;
break;
case ZPOOL_OPTION_JSON_NUMS_AS_INT:
cb.cb_json_as_int = B_TRUE;
cb.cb_literal = B_TRUE;
break;
case ZPOOL_OPTION_POOL_KEY_GUID:
cb.cb_json_pool_key_guid = B_TRUE;
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("status");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (argc == 0)
cb.cb_allpools = B_TRUE;
cb.cb_first = B_TRUE;
cb.cb_print_status = B_TRUE;
if (cb.cb_flat_vdevs && !cb.cb_json) {
fprintf(stderr, gettext("'--json-flat-vdevs' only works with"
" '-j' option\n"));
usage(B_FALSE);
}
if (cb.cb_json_as_int && !cb.cb_json) {
(void) fprintf(stderr, gettext("'--json-int' only works with"
" '-j' option\n"));
usage(B_FALSE);
}
if (!cb.cb_json && cb.cb_json_pool_key_guid) {
(void) fprintf(stderr, gettext("'json-pool-key-guid' only"
" works with '-j' option\n"));
usage(B_FALSE);
}
for (;;) {
if (cb.cb_json) {
cb.cb_jsobj = zpool_json_schema(0, 1);
data = fnvlist_alloc();
fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
fnvlist_free(data);
}
if (timestamp_fmt != NODATE) {
if (cb.cb_json) {
if (cb.cb_json_as_int) {
fnvlist_add_uint64(cb.cb_jsobj, "time",
time(NULL));
} else {
char ts[128];
get_timestamp(timestamp_fmt, ts, 128);
fnvlist_add_string(cb.cb_jsobj, "time",
ts);
}
} else
print_timestamp(timestamp_fmt);
}
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
NULL, NULL, 0, 0);
if (cb.cb_json) {
ret = for_each_pool(argc, argv, B_TRUE, NULL,
ZFS_TYPE_POOL, cb.cb_literal,
status_callback_json, &cb);
} else {
ret = for_each_pool(argc, argv, B_TRUE, NULL,
ZFS_TYPE_POOL, cb.cb_literal,
status_callback, &cb);
}
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
if (cb.cb_json) {
if (ret == 0)
zcmd_print_json(cb.cb_jsobj);
else
nvlist_free(cb.cb_jsobj);
} else {
if (argc == 0 && cb.cb_count == 0) {
(void) fprintf(stderr, "%s",
gettext("no pools available\n"));
} else if (cb.cb_explain && cb.cb_first &&
cb.cb_allpools) {
(void) printf("%s",
gettext("all pools are healthy\n"));
}
}
if (ret != 0)
return (ret);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fflush(stdout);
(void) fsleep(interval);
}
return (0);
}
typedef struct upgrade_cbdata {
int cb_first;
int cb_argc;
uint64_t cb_version;
char **cb_argv;
} upgrade_cbdata_t;
static int
check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
{
int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int *count = (int *)unsupp_fs;
if (zfs_version > ZPL_VERSION) {
(void) printf(gettext("%s (v%d) is not supported by this "
"implementation of ZFS.\n"),
zfs_get_name(zhp), zfs_version);
(*count)++;
}
zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
zfs_close(zhp);
return (0);
}
static int
upgrade_version(zpool_handle_t *zhp, uint64_t version)
{
int ret;
nvlist_t *config;
uint64_t oldversion;
int unsupp_fs = 0;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&oldversion) == 0);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
assert(SPA_VERSION_IS_SUPPORTED(oldversion));
assert(oldversion < version);
ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
if (ret != 0)
return (ret);
if (unsupp_fs) {
(void) fprintf(stderr, gettext("Upgrade not performed due "
"to %d unsupported filesystems (max v%d).\n"),
unsupp_fs, (int)ZPL_VERSION);
return (1);
}
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
(void) fprintf(stderr, gettext("Upgrade not performed because "
"'compatibility' property set to '"
ZPOOL_COMPAT_LEGACY "'.\n"));
return (1);
}
ret = zpool_upgrade(zhp, version);
if (ret != 0)
return (ret);
if (version >= SPA_VERSION_FEATURES) {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to feature flags.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion);
} else {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to version %llu.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion,
(u_longlong_t)version);
}
return (0);
}
static int
upgrade_enable_all(zpool_handle_t *zhp, int *countp)
{
int i, ret, count;
boolean_t firstff = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
count = 0;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fname = spa_feature_table[i].fi_uname;
const char *fguid = spa_feature_table[i].fi_guid;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
char *propname;
verify(-1 != asprintf(&propname, "feature@%s", fname));
ret = zpool_set_prop(zhp, propname,
ZFS_FEATURE_ENABLED);
if (ret != 0) {
free(propname);
return (ret);
}
count++;
if (firstff) {
(void) printf(gettext("Enabled the "
"following features on '%s':\n"),
zpool_get_name(zhp));
firstff = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
free(propname);
}
}
if (countp != NULL)
*countp = count;
return (0);
}
static int
upgrade_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
boolean_t modified_pool = B_FALSE;
int ret;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < cbp->cb_version) {
cbp->cb_first = B_FALSE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
modified_pool = B_TRUE;
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count > 0) {
cbp->cb_first = B_FALSE;
modified_pool = B_TRUE;
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
static int
upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < SPA_VERSION_FEATURES) {
if (cbp->cb_first) {
(void) printf(gettext("The following pools are "
"formatted with legacy version numbers and can\n"
"be upgraded to use feature flags. After "
"being upgraded, these pools\nwill no "
"longer be accessible by software that does not "
"support feature\nflags.\n\n"
"Note that setting a pool's 'compatibility' "
"feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
"inhibit upgrades.\n\n"));
(void) printf(gettext("VER POOL\n"));
(void) printf(gettext("--- ------------\n"));
cbp->cb_first = B_FALSE;
}
(void) printf("%2llu %s\n", (u_longlong_t)version,
zpool_get_name(zhp));
}
return (0);
}
static int
upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if (version >= SPA_VERSION_FEATURES) {
int i;
boolean_t poolfirst = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
const char *fname = spa_feature_table[i].fi_uname;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid)) {
if (cbp->cb_first) {
(void) printf(gettext("\nSome "
"supported features are not "
"enabled on the following pools. "
"Once a\nfeature is enabled the "
"pool may become incompatible with "
"software\nthat does not support "
"the feature. See "
"zpool-features(7) for "
"details.\n\n"
"Note that the pool "
"'compatibility' feature can be "
"used to inhibit\nfeature "
"upgrades.\n\n"));
(void) printf(gettext("POOL "
"FEATURE\n"));
(void) printf(gettext("------"
"---------\n"));
cbp->cb_first = B_FALSE;
}
if (poolfirst) {
(void) printf(gettext("%s\n"),
zpool_get_name(zhp));
poolfirst = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
}
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
}
return (0);
}
static int
upgrade_one(zpool_handle_t *zhp, void *data)
{
boolean_t modified_pool = B_FALSE;
upgrade_cbdata_t *cbp = data;
uint64_t cur_version;
int ret;
if (strcmp("log", zpool_get_name(zhp)) == 0) {
(void) fprintf(stderr, gettext("'log' is now a reserved word\n"
"Pool 'log' must be renamed using export and import"
" to upgrade.\n"));
return (1);
}
cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (cur_version > cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using more current version '%llu'.\n\n"),
zpool_get_name(zhp), (u_longlong_t)cur_version);
return (0);
}
if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using version %llu.\n\n"), zpool_get_name(zhp),
(u_longlong_t)cbp->cb_version);
return (0);
}
if (cur_version != cbp->cb_version) {
modified_pool = B_TRUE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count = 0;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count != 0) {
modified_pool = B_TRUE;
} else if (cur_version == SPA_VERSION) {
(void) printf(gettext("Pool '%s' already has all "
"supported and requested features enabled.\n"),
zpool_get_name(zhp));
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
/*
* zpool upgrade
* zpool upgrade -v
* zpool upgrade [-V version] <-a | pool ...>
*
* With no arguments, display downrev'd ZFS pool available for upgrade.
* Individual pools can be upgraded by specifying the pool, and '-a' will
* upgrade all pools.
*/
int
zpool_do_upgrade(int argc, char **argv)
{
int c;
upgrade_cbdata_t cb = { 0 };
int ret = 0;
boolean_t showversions = B_FALSE;
boolean_t upgradeall = B_FALSE;
char *end;
/* check options */
while ((c = getopt(argc, argv, ":avV:")) != -1) {
switch (c) {
case 'a':
upgradeall = B_TRUE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
cb.cb_version = strtoll(optarg, &end, 10);
if (*end != '\0' ||
!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
(void) fprintf(stderr,
gettext("invalid version '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.cb_argc = argc;
cb.cb_argv = argv;
argc -= optind;
argv += optind;
if (cb.cb_version == 0) {
cb.cb_version = SPA_VERSION;
} else if (!upgradeall && argc == 0) {
(void) fprintf(stderr, gettext("-V option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
if (showversions) {
if (upgradeall || argc != 0) {
(void) fprintf(stderr, gettext("-v option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
} else if (upgradeall) {
if (argc != 0) {
(void) fprintf(stderr, gettext("-a option should not "
"be used along with a pool name\n"));
usage(B_FALSE);
}
}
(void) printf("%s", gettext("This system supports ZFS pool feature "
"flags.\n\n"));
if (showversions) {
int i;
(void) printf(gettext("The following features are "
"supported:\n\n"));
(void) printf(gettext("FEAT DESCRIPTION\n"));
(void) printf("----------------------------------------------"
"---------------\n");
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *fi = &spa_feature_table[i];
if (!fi->fi_zfs_mod_supported)
continue;
const char *ro =
(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
" (read-only compatible)" : "";
(void) printf("%-37s%s\n", fi->fi_uname, ro);
(void) printf(" %s\n", fi->fi_desc);
}
(void) printf("\n");
(void) printf(gettext("The following legacy versions are also "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS version\n"));
(void) printf(gettext(" 2 Ditto blocks "
"(replicated metadata)\n"));
(void) printf(gettext(" 3 Hot spares and double parity "
"RAID-Z\n"));
(void) printf(gettext(" 4 zpool history\n"));
(void) printf(gettext(" 5 Compression using the gzip "
"algorithm\n"));
(void) printf(gettext(" 6 bootfs pool property\n"));
(void) printf(gettext(" 7 Separate intent log devices\n"));
(void) printf(gettext(" 8 Delegated administration\n"));
(void) printf(gettext(" 9 refquota and refreservation "
"properties\n"));
(void) printf(gettext(" 10 Cache devices\n"));
(void) printf(gettext(" 11 Improved scrub performance\n"));
(void) printf(gettext(" 12 Snapshot properties\n"));
(void) printf(gettext(" 13 snapused property\n"));
(void) printf(gettext(" 14 passthrough-x aclinherit\n"));
(void) printf(gettext(" 15 user/group space accounting\n"));
(void) printf(gettext(" 16 stmf property support\n"));
(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
(void) printf(gettext(" 18 Snapshot user holds\n"));
(void) printf(gettext(" 19 Log device removal\n"));
(void) printf(gettext(" 20 Compression using zle "
"(zero-length encoding)\n"));
(void) printf(gettext(" 21 Deduplication\n"));
(void) printf(gettext(" 22 Received properties\n"));
(void) printf(gettext(" 23 Slim ZIL\n"));
(void) printf(gettext(" 24 System attributes\n"));
(void) printf(gettext(" 25 Improved scrub stats\n"));
(void) printf(gettext(" 26 Improved snapshot deletion "
"performance\n"));
(void) printf(gettext(" 27 Improved snapshot creation "
"performance\n"));
(void) printf(gettext(" 28 Multiple vdev replacements\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
} else if (argc == 0 && upgradeall) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_cb, &cb);
if (ret == 0 && cb.cb_first) {
if (cb.cb_version == SPA_VERSION) {
(void) printf(gettext("All pools are already "
"formatted using feature flags.\n\n"));
(void) printf(gettext("Every feature flags "
"pool already has all supported and "
"requested features enabled.\n"));
} else {
(void) printf(gettext("All pools are already "
"formatted with version %llu or higher.\n"),
(u_longlong_t)cb.cb_version);
}
}
} else if (argc == 0) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("All pools are formatted "
"using feature flags.\n\n"));
} else {
(void) printf(gettext("\nUse 'zpool upgrade -v' "
"for a list of available legacy versions.\n"));
}
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("Every feature flags pool has "
"all supported and requested features enabled.\n"));
} else {
(void) printf(gettext("\n"));
}
} else {
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, upgrade_one, &cb);
}
return (ret);
}
typedef struct hist_cbdata {
boolean_t first;
boolean_t longfmt;
boolean_t internal;
} hist_cbdata_t;
static void
print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
{
nvlist_t **records;
uint_t numrecords;
int i;
verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
&records, &numrecords) == 0);
for (i = 0; i < numrecords; i++) {
nvlist_t *rec = records[i];
char tbuf[64] = "";
if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(records[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
}
if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
ZPOOL_HIST_ELAPSED_NS);
(void) snprintf(tbuf + strlen(tbuf),
sizeof (tbuf) - strlen(tbuf),
" (%lldms)", (long long)elapsed_ns / 1000 / 1000);
}
if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
(void) printf("%s %s", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
int ievent =
fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
if (!cb->internal)
continue;
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
(void) printf("%s unrecognized record:\n",
tbuf);
dump_nvlist(rec, 4);
continue;
}
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
zfs_history_event_names[ievent],
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
if (!cb->internal)
continue;
(void) printf("%s [txg:%lld] %s", tbuf,
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(rec,
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(rec,
ZPOOL_HIST_DSID));
}
(void) printf(" %s", fnvlist_lookup_string(rec,
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
if (!cb->internal)
continue;
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
(void) printf(" output nvlist omitted; "
"original size: %lldKB\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_OUTPUT_SIZE) / 1024);
}
if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_ERRNO));
}
} else {
if (!cb->internal)
continue;
(void) printf("%s unrecognized record:\n", tbuf);
dump_nvlist(rec, 4);
}
if (!cb->longfmt) {
(void) printf("\n");
continue;
}
(void) printf(" [");
if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
struct passwd *pwd = getpwuid(who);
(void) printf("user %d ", (int)who);
if (pwd != NULL)
(void) printf("(%s) ", pwd->pw_name);
}
if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
(void) printf("on %s",
fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
}
if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
(void) printf(":%s",
fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
}
(void) printf("]");
(void) printf("\n");
}
}
/*
* Print out the command history for a specific pool.
*/
static int
get_history_one(zpool_handle_t *zhp, void *data)
{
nvlist_t *nvhis;
int ret;
hist_cbdata_t *cb = (hist_cbdata_t *)data;
uint64_t off = 0;
boolean_t eof = B_FALSE;
cb->first = B_FALSE;
(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
while (!eof) {
if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
return (ret);
print_history_records(nvhis, cb);
nvlist_free(nvhis);
}
(void) printf("\n");
return (ret);
}
/*
* zpool history <pool>
*
* Displays the history of commands that modified pools.
*/
int
zpool_do_history(int argc, char **argv)
{
hist_cbdata_t cbdata = { 0 };
int ret;
int c;
cbdata.first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "li")) != -1) {
switch (c) {
case 'l':
cbdata.longfmt = B_TRUE;
break;
case 'i':
cbdata.internal = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, get_history_one, &cbdata);
if (argc == 0 && cbdata.first == B_TRUE) {
(void) fprintf(stderr, gettext("no pools available\n"));
return (0);
}
return (ret);
}
typedef struct ev_opts {
int verbose;
int scripted;
int follow;
int clear;
char poolname[ZFS_MAX_DATASET_NAME_LEN];
} ev_opts_t;
static void
zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
{
char ctime_str[26], str[32];
const char *ptr;
int64_t *tv;
uint_t n;
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
memset(str, ' ', 32);
(void) ctime_r((const time_t *)&tv[0], ctime_str);
(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
if (opts->scripted)
(void) printf(gettext("%s\t"), str);
else
(void) printf(gettext("%s "), str);
verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
(void) printf(gettext("%s\n"), ptr);
}
static void
zpool_do_events_nvprint(nvlist_t *nvl, int depth)
{
nvpair_t *nvp;
static char flagstr[256];
for (nvp = nvlist_next_nvpair(nvl, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
data_type_t type = nvpair_type(nvp);
const char *name = nvpair_name(nvp);
boolean_t b;
uint8_t i8;
uint16_t i16;
uint32_t i32;
uint64_t i64;
const char *str;
nvlist_t *cnv;
printf(gettext("%*s%s = "), depth, "", name);
switch (type) {
case DATA_TYPE_BOOLEAN:
printf(gettext("%s"), "1");
break;
case DATA_TYPE_BOOLEAN_VALUE:
(void) nvpair_value_boolean_value(nvp, &b);
printf(gettext("%s"), b ? "1" : "0");
break;
case DATA_TYPE_BYTE:
(void) nvpair_value_byte(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT8:
(void) nvpair_value_int8(nvp, (void *)&i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_UINT8:
(void) nvpair_value_uint8(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT16:
(void) nvpair_value_int16(nvp, (void *)&i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_UINT16:
(void) nvpair_value_uint16(nvp, &i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_INT32:
(void) nvpair_value_int32(nvp, (void *)&i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &i32);
if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||
strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {
zfs_valstr_zio_stage(i32, flagstr,
sizeof (flagstr));
printf(gettext("0x%x [%s]"), i32, flagstr);
} else if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
zfs_valstr_zio_priority(i32, flagstr,
sizeof (flagstr));
printf(gettext("0x%x [%s]"), i32, flagstr);
} else {
printf(gettext("0x%x"), i32);
}
break;
case DATA_TYPE_INT64:
(void) nvpair_value_int64(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &i64);
/*
* translate vdev state values to readable
* strings to aide zpool events consumers
*/
if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
printf(gettext("\"%s\" (0x%llx)"),
zpool_state_to_name(i64, VDEV_AUX_NONE),
(u_longlong_t)i64);
} else if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {
zfs_valstr_zio_flag(i64, flagstr,
sizeof (flagstr));
printf(gettext("0x%llx [%s]"),
(u_longlong_t)i64, flagstr);
} else {
printf(gettext("0x%llx"), (u_longlong_t)i64);
}
break;
case DATA_TYPE_HRTIME:
(void) nvpair_value_hrtime(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &str);
printf(gettext("\"%s\""), str ? str : "<NULL>");
break;
case DATA_TYPE_NVLIST:
printf(gettext("(embedded nvlist)\n"));
(void) nvpair_value_nvlist(nvp, &cnv);
zpool_do_events_nvprint(cnv, depth + 8);
printf(gettext("%*s(end %s)"), depth, "", name);
break;
case DATA_TYPE_NVLIST_ARRAY: {
nvlist_t **val;
uint_t i, nelem;
(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
printf(gettext("(%d embedded nvlists)\n"), nelem);
for (i = 0; i < nelem; i++) {
printf(gettext("%*s%s[%d] = %s\n"),
depth, "", name, i, "(embedded nvlist)");
zpool_do_events_nvprint(val[i], depth + 8);
printf(gettext("%*s(end %s[%i])\n"),
depth, "", name, i);
}
printf(gettext("%*s(end %s)\n"), depth, "", name);
}
break;
case DATA_TYPE_INT8_ARRAY: {
int8_t *val;
uint_t i, nelem;
(void) nvpair_value_int8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT8_ARRAY: {
uint8_t *val;
uint_t i, nelem;
(void) nvpair_value_uint8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT16_ARRAY: {
int16_t *val;
uint_t i, nelem;
(void) nvpair_value_int16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT16_ARRAY: {
uint16_t *val;
uint_t i, nelem;
(void) nvpair_value_uint16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT32_ARRAY: {
int32_t *val;
uint_t i, nelem;
(void) nvpair_value_int32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT32_ARRAY: {
uint32_t *val;
uint_t i, nelem;
(void) nvpair_value_uint32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *val;
uint_t i, nelem;
(void) nvpair_value_int64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *val;
uint_t i, nelem;
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_STRING_ARRAY: {
const char **str;
uint_t i, nelem;
(void) nvpair_value_string_array(nvp, &str, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("\"%s\" "),
str[i] ? str[i] : "<NULL>");
break;
}
case DATA_TYPE_BOOLEAN_ARRAY:
case DATA_TYPE_BYTE_ARRAY:
case DATA_TYPE_DOUBLE:
case DATA_TYPE_DONTCARE:
case DATA_TYPE_UNKNOWN:
printf(gettext("<unknown>"));
break;
}
printf(gettext("\n"));
}
}
static int
zpool_do_events_next(ev_opts_t *opts)
{
nvlist_t *nvl;
int zevent_fd, ret, dropped;
const char *pool;
zevent_fd = open(ZFS_DEV, O_RDWR);
VERIFY(zevent_fd >= 0);
if (!opts->scripted)
(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
while (1) {
ret = zpool_events_next(g_zfs, &nvl, &dropped,
(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
if (ret || nvl == NULL)
break;
if (dropped > 0)
(void) printf(gettext("dropped %d events\n"), dropped);
if (strlen(opts->poolname) > 0 &&
nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
strcmp(opts->poolname, pool) != 0)
continue;
zpool_do_events_short(nvl, opts);
if (opts->verbose) {
zpool_do_events_nvprint(nvl, 8);
printf(gettext("\n"));
}
(void) fflush(stdout);
nvlist_free(nvl);
}
VERIFY(0 == close(zevent_fd));
return (ret);
}
static int
zpool_do_events_clear(void)
{
int count, ret;
ret = zpool_events_clear(g_zfs, &count);
if (!ret)
(void) printf(gettext("cleared %d events\n"), count);
return (ret);
}
/*
* zpool events [-vHf [pool] | -c]
*
* Displays events logs by ZFS.
*/
int
zpool_do_events(int argc, char **argv)
{
ev_opts_t opts = { 0 };
int ret;
int c;
/* check options */
while ((c = getopt(argc, argv, "vHfc")) != -1) {
switch (c) {
case 'v':
opts.verbose = 1;
break;
case 'H':
opts.scripted = 1;
break;
case 'f':
opts.follow = 1;
break;
case 'c':
opts.clear = 1;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
} else if (argc == 1) {
(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
(void) fprintf(stderr,
gettext("invalid pool name '%s'\n"), opts.poolname);
usage(B_FALSE);
}
}
if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
opts.clear) {
(void) fprintf(stderr,
gettext("invalid options combined with -c\n"));
usage(B_FALSE);
}
if (opts.clear)
ret = zpool_do_events_clear();
else
ret = zpool_do_events_next(&opts);
return (ret);
}
static int
get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[ZFS_MAXPROPLEN];
zprop_source_t srctype;
nvlist_t *props, *item, *d;
props = item = d = NULL;
if (cbp->cb_json) {
d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs");
if (d == NULL) {
fprintf(stderr, "vdevs obj not found.\n");
exit(1);
}
props = fnvlist_alloc();
}
for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
pl = pl->pl_next) {
char *prop_name;
/*
* If the first property is pool name, it is a special
* placeholder that we can skip. This will also skip
* over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL) {
prop_name = pl->pl_user_prop;
} else {
prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
}
if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
prop_name, value, sizeof (value), &srctype,
cbp->cb_literal) == 0) {
zprop_collect_property(vdevname, cbp, prop_name,
value, srctype, NULL, NULL, props);
}
}
if (cbp->cb_json) {
if (!nvlist_empty(props)) {
item = fnvlist_alloc();
fill_vdev_info(item, zhp, vdevname, B_TRUE,
cbp->cb_json_as_int);
fnvlist_add_nvlist(item, "properties", props);
fnvlist_add_nvlist(d, vdevname, item);
fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d);
fnvlist_free(item);
}
fnvlist_free(props);
}
return (0);
}
static int
get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
{
zpool_handle_t *zhp = zhp_data;
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char *vdevname;
const char *type;
int ret;
/*
* zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
* pool name for display purposes, which is not desired. Fallback to
* zpool_vdev_name() when not dealing with the root vdev.
*/
type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
if (zhp != NULL && strcmp(type, "root") == 0)
vdevname = strdup("root-0");
else
vdevname = zpool_vdev_name(g_zfs, zhp, nv,
cbp->cb_vdevs.cb_name_flags);
(void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
ret = get_callback_vdev(zhp, vdevname, data);
free(vdevname);
return (ret);
}
static int
get_callback(zpool_handle_t *zhp, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[ZFS_MAXPROPLEN];
zprop_source_t srctype;
zprop_list_t *pl;
int vid;
int err = 0;
nvlist_t *props, *item, *d;
props = item = d = NULL;
if (cbp->cb_type == ZFS_TYPE_VDEV) {
if (cbp->cb_json) {
nvlist_t *pool = fnvlist_alloc();
fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int);
fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool);
fnvlist_free(pool);
}
if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
for_each_vdev(zhp, get_callback_vdev_cb, data);
} else {
/* Adjust column widths for vdev properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
vdev_expand_proplist(zhp,
cbp->cb_vdevs.cb_names[vid],
&cbp->cb_proplist);
}
/* Display the properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
get_callback_vdev(zhp,
cbp->cb_vdevs.cb_names[vid], data);
}
}
} else {
assert(cbp->cb_type == ZFS_TYPE_POOL);
if (cbp->cb_json) {
d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
if (d == NULL) {
fprintf(stderr, "pools obj not found.\n");
exit(1);
}
props = fnvlist_alloc();
}
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* Skip the special fake placeholder. This will also
* skip over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL &&
zfs_prop_user(pl->pl_user_prop)) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_get_userprop(zhp, pl->pl_user_prop,
value, sizeof (value), &srctype) != 0)
continue;
err = zprop_collect_property(
zpool_get_name(zhp), cbp, pl->pl_user_prop,
value, srctype, NULL, NULL, props);
} else if (pl->pl_prop == ZPROP_INVAL &&
(zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop))) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_prop_get_feature(zhp,
pl->pl_user_prop, value,
sizeof (value)) == 0) {
err = zprop_collect_property(
zpool_get_name(zhp), cbp,
pl->pl_user_prop, value, srctype,
NULL, NULL, props);
}
} else {
if (zpool_get_prop(zhp, pl->pl_prop, value,
sizeof (value), &srctype,
cbp->cb_literal) != 0)
continue;
err = zprop_collect_property(
zpool_get_name(zhp), cbp,
zpool_prop_to_name(pl->pl_prop),
value, srctype, NULL, NULL, props);
}
if (err != 0)
return (err);
}
if (cbp->cb_json) {
if (!nvlist_empty(props)) {
item = fnvlist_alloc();
fill_pool_info(item, zhp, B_TRUE,
cbp->cb_json_as_int);
fnvlist_add_nvlist(item, "properties", props);
if (cbp->cb_json_pool_key_guid) {
char buf[256];
uint64_t guid = fnvlist_lookup_uint64(
zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_POOL_GUID);
snprintf(buf, 256, "%llu",
(u_longlong_t)guid);
fnvlist_add_nvlist(d, buf, item);
} else {
const char *name = zpool_get_name(zhp);
fnvlist_add_nvlist(d, name, item);
}
fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
fnvlist_free(item);
}
fnvlist_free(props);
}
}
return (0);
}
/*
* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
*
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -o List of columns to display. Defaults to
* "name,property,value,source".
* -p Display values in parsable (exact) format.
* -j Display output in JSON format.
* --json-int Display numbers as integers instead of strings.
* --json-pool-key-guid Set pool GUID as key for pool objects.
*
* Get properties of pools in the system. Output space statistics
* for each one as well as other attributes.
*/
int
zpool_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
zprop_list_t fake_name = { 0 };
int ret;
int c, i;
char *propstr = NULL;
char *vdev = NULL;
nvlist_t *data = NULL;
cb.cb_first = B_TRUE;
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_POOL;
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
current_prop_type = cb.cb_type;
struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
{"json-pool-key-guid", no_argument, NULL,
ZPOOL_OPTION_POOL_KEY_GUID},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":jHpo:", long_options,
NULL)) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'j':
cb.cb_json = B_TRUE;
cb.cb_jsobj = zpool_json_schema(0, 1);
data = fnvlist_alloc();
break;
case ZPOOL_OPTION_POOL_KEY_GUID:
cb.cb_json_pool_key_guid = B_TRUE;
break;
case ZPOOL_OPTION_JSON_NUMS_AS_INT:
cb.cb_json_as_int = B_TRUE;
cb.cb_literal = B_TRUE;
break;
case 'o':
memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
i = 0;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_opts[] =
{ "name", "property", "value", "source",
"all" };
static const zfs_get_column_t col_cols[] =
{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
GET_COL_SOURCE };
if (i == ZFS_GET_NCOLS - 1) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
if (strcmp(tok, col_opts[c]) == 0)
goto found;
(void) fprintf(stderr,
gettext("invalid column name '%s'\n"), tok);
usage(B_FALSE);
found:
if (c >= 4) {
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
memcpy(cb.cb_columns, col_cols,
sizeof (col_cols));
i = ZFS_GET_NCOLS - 1;
} else
cb.cb_columns[i++] = col_cols[c];
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (!cb.cb_json && cb.cb_json_as_int) {
(void) fprintf(stderr, gettext("'--json-int' only works with"
" '-j' option\n"));
usage(B_FALSE);
}
if (!cb.cb_json && cb.cb_json_pool_key_guid) {
(void) fprintf(stderr, gettext("'json-pool-key-guid' only"
" works with '-j' option\n"));
usage(B_FALSE);
}
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
/* Properties list is needed later by zprop_get_list() */
propstr = argv[0];
argc--;
argv++;
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
(argc == 2 && strcmp(argv[1], "root") == 0) ||
are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) {
if (strcmp(argv[1], "root") == 0)
vdev = strdup("root-0");
else
vdev = strdup(argv[1]);
/* ... and the rest are vdev names */
cb.cb_vdevs.cb_names = &vdev;
cb.cb_vdevs.cb_names_count = argc - 1;
cb.cb_type = ZFS_TYPE_VDEV;
argc = 1; /* One pool to process */
} else {
if (cb.cb_json) {
nvlist_free(cb.cb_jsobj);
nvlist_free(data);
}
fprintf(stderr, gettext("Expected a list of vdevs in"
" \"%s\", but got:\n"), argv[0]);
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
if (cb.cb_json) {
nvlist_free(cb.cb_jsobj);
nvlist_free(data);
}
/*
* The first arg isn't the name of a valid pool.
*/
fprintf(stderr, gettext("Cannot get properties of %s: "
"no such pool available.\n"), argv[0]);
return (1);
}
if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
cb.cb_type) != 0) {
/* Use correct list of valid properties (pool or vdev) */
current_prop_type = cb.cb_type;
usage(B_FALSE);
}
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZPOOL_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
if (cb.cb_json) {
if (cb.cb_type == ZFS_TYPE_VDEV)
fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data);
else
fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
fnvlist_free(data);
}
ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
cb.cb_literal, get_callback, &cb);
if (ret == 0 && cb.cb_json)
zcmd_print_json(cb.cb_jsobj);
else if (ret != 0 && cb.cb_json)
nvlist_free(cb.cb_jsobj);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
if (vdev != NULL)
free(vdev);
return (ret);
}
typedef struct set_cbdata {
char *cb_propname;
char *cb_value;
zfs_type_t cb_type;
vdev_cbdata_t cb_vdevs;
boolean_t cb_any_successful;
} set_cbdata_t;
static int
set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
{
int error;
/* Check if we have out-of-bounds features */
if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(cb->cb_value, features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
nvlist_t *enabled = zpool_get_features(zhp);
spa_feature_t i;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
if (nvlist_exists(enabled, fguid) && !features[i])
break;
}
if (i < SPA_FEATURES)
(void) fprintf(stderr, gettext("Warning: one or "
"more features already enabled on pool '%s'\n"
"are not present in this compatibility set.\n"),
zpool_get_name(zhp));
}
/* if we're setting a feature, check it's in compatibility set */
if (zpool_prop_feature(cb->cb_propname) &&
strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
char *fname = strchr(cb->cb_propname, '@') + 1;
spa_feature_t f;
if (zfeature_lookup_name(fname, &f) == 0) {
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(compat, features) !=
ZPOOL_COMPATIBILITY_OK) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"because the pool's 'compatibility' "
"property cannot be parsed.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
if (!features[f]) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"as it is not specified in this pool's "
"current compatibility set.\n"
"Consider setting 'compatibility' to a "
"less restrictive set, or to 'off'.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
}
}
error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
return (error);
}
static int
set_callback(zpool_handle_t *zhp, void *data)
{
int error;
set_cbdata_t *cb = (set_cbdata_t *)data;
if (cb->cb_type == ZFS_TYPE_VDEV) {
error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
cb->cb_propname, cb->cb_value);
} else {
assert(cb->cb_type == ZFS_TYPE_POOL);
error = set_pool_callback(zhp, cb);
}
cb->cb_any_successful = !error;
return (error);
}
int
zpool_do_set(int argc, char **argv)
{
set_cbdata_t cb = { 0 };
int error;
char *vdev = NULL;
current_prop_type = ZFS_TYPE_POOL;
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing property=value "
"argument\n"));
usage(B_FALSE);
}
if (argc < 3) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 4) {
(void) fprintf(stderr, gettext("too many pool names\n"));
usage(B_FALSE);
}
cb.cb_propname = argv[1];
cb.cb_type = ZFS_TYPE_POOL;
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
cb.cb_value = strchr(cb.cb_propname, '=');
if (cb.cb_value == NULL) {
(void) fprintf(stderr, gettext("missing value in "
"property=value argument\n"));
usage(B_FALSE);
}
*(cb.cb_value) = '\0';
cb.cb_value++;
argc -= 2;
argv += 2;
/* argv[0] is pool name */
if (!is_pool(argv[0])) {
(void) fprintf(stderr,
gettext("cannot open '%s': is not a pool\n"), argv[0]);
return (EINVAL);
}
/* argv[1], when supplied, is vdev name */
if (argc == 2) {
if (strcmp(argv[1], "root") == 0)
vdev = strdup("root-0");
else
vdev = strdup(argv[1]);
if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
(void) fprintf(stderr, gettext(
"cannot find '%s' in '%s': device not in pool\n"),
vdev, argv[0]);
free(vdev);
return (EINVAL);
}
cb.cb_vdevs.cb_names = &vdev;
cb.cb_vdevs.cb_names_count = 1;
cb.cb_type = ZFS_TYPE_VDEV;
}
error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, set_callback, &cb);
if (vdev != NULL)
free(vdev);
return (error);
}
/* Add up the total number of bytes left to initialize/trim across all vdevs */
static uint64_t
vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
{
uint64_t bytes_remaining;
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
assert(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (activity == ZPOOL_WAIT_INITIALIZE &&
vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
bytes_remaining = vs->vs_initialize_bytes_est -
vs->vs_initialize_bytes_done;
else if (activity == ZPOOL_WAIT_TRIM &&
vs->vs_trim_state == VDEV_TRIM_ACTIVE)
bytes_remaining = vs->vs_trim_bytes_est -
vs->vs_trim_bytes_done;
else
bytes_remaining = 0;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++)
bytes_remaining += vdev_activity_remaining(child[c], activity);
return (bytes_remaining);
}
/* Add up the total number of bytes left to rebuild across top-level vdevs */
static uint64_t
vdev_activity_top_remaining(nvlist_t *nv)
{
uint64_t bytes_remaining = 0;
nvlist_t **child;
uint_t children;
int error;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
error = nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
if (error == 0) {
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
bytes_remaining += (vrs->vrs_bytes_est -
vrs->vrs_bytes_rebuilt);
}
}
}
return (bytes_remaining);
}
/* Whether any vdevs are 'spare' or 'replacing' vdevs */
static boolean_t
vdev_any_spare_replacing(nvlist_t *nv)
{
nvlist_t **child;
uint_t c, children;
const char *vdev_type;
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
return (B_TRUE);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++) {
if (vdev_any_spare_replacing(child[c]))
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct wait_data {
char *wd_poolname;
boolean_t wd_scripted;
boolean_t wd_exact;
boolean_t wd_headers_once;
boolean_t wd_should_exit;
/* Which activities to wait for */
boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
float wd_interval;
pthread_cond_t wd_cv;
pthread_mutex_t wd_mutex;
} wait_data_t;
/*
* Print to stdout a single line, containing one column for each activity that
* we are waiting for specifying how many bytes of work are left for that
* activity.
*/
static void
print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
{
nvlist_t *config, *nvroot;
uint_t c;
int i;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *pss = NULL;
pool_removal_stat_t *prs = NULL;
pool_raidz_expand_stat_t *pres = NULL;
const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
"REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
/* Calculate the width of each column */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
/*
* Make sure we have enough space in the col for pretty-printed
* numbers and for the column header, and then leave a couple
* spaces between cols for readability.
*/
col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
}
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
/* Print header if appropriate */
int term_height = terminal_height();
boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
row % (term_height-1) == 0);
if (!wd->wd_scripted && (row == 0 || reprint_header)) {
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
if (wd->wd_enabled[i])
(void) printf("%*s", col_widths[i], headers[i]);
}
(void) fputc('\n', stdout);
}
/* Bytes of work remaining in each activity */
int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
bytes_rem[ZPOOL_WAIT_FREE] =
zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
if (prs != NULL && prs->prs_state == DSS_SCANNING)
bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
prs->prs_copied;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
if (pss != NULL && pss->pss_state == DSS_SCANNING &&
pss->pss_pass_scrub_pause == 0) {
int64_t rem = pss->pss_to_examine - pss->pss_issued;
if (pss->pss_func == POOL_SCAN_SCRUB)
bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
else
bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
} else if (check_rebuilding(nvroot, NULL)) {
bytes_rem[ZPOOL_WAIT_RESILVER] =
vdev_activity_top_remaining(nvroot);
}
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
if (pres != NULL && pres->pres_state == DSS_SCANNING) {
int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
}
bytes_rem[ZPOOL_WAIT_INITIALIZE] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
bytes_rem[ZPOOL_WAIT_TRIM] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
/*
* A replace finishes after resilvering finishes, so the amount of work
* left for a replace is the same as for resilvering.
*
* It isn't quite correct to say that if we have any 'spare' or
* 'replacing' vdevs and a resilver is happening, then a replace is in
* progress, like we do here. When a hot spare is used, the faulted vdev
* is not removed after the hot spare is resilvered, so parent 'spare'
* vdev is not removed either. So we could have a 'spare' vdev, but be
* resilvering for a different reason. However, we use it as a heuristic
* because we don't have access to the DTLs, which could tell us whether
* or not we have really finished resilvering a hot spare.
*/
if (vdev_any_spare_replacing(nvroot))
bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
char buf[64];
if (!wd->wd_enabled[i])
continue;
if (wd->wd_exact) {
(void) snprintf(buf, sizeof (buf), "%" PRIi64,
bytes_rem[i]);
} else {
zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
}
if (wd->wd_scripted)
(void) printf(i == 0 ? "%s" : "\t%s", buf);
else
(void) printf(" %*s", col_widths[i] - 1, buf);
}
(void) printf("\n");
(void) fflush(stdout);
}
static void *
wait_status_thread(void *arg)
{
wait_data_t *wd = (wait_data_t *)arg;
zpool_handle_t *zhp;
if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
return (void *)(1);
for (int row = 0; ; row++) {
boolean_t missing;
struct timespec timeout;
int ret = 0;
(void) clock_gettime(CLOCK_REALTIME, &timeout);
if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
zpool_props_refresh(zhp) != 0) {
zpool_close(zhp);
return (void *)(uintptr_t)(missing ? 0 : 1);
}
print_wait_status_row(wd, zhp, row);
timeout.tv_sec += floor(wd->wd_interval);
long nanos = timeout.tv_nsec +
(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
if (nanos >= NANOSEC) {
timeout.tv_sec++;
timeout.tv_nsec = nanos - NANOSEC;
} else {
timeout.tv_nsec = nanos;
}
pthread_mutex_lock(&wd->wd_mutex);
if (!wd->wd_should_exit)
ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
&timeout);
pthread_mutex_unlock(&wd->wd_mutex);
if (ret == 0) {
break; /* signaled by main thread */
} else if (ret != ETIMEDOUT) {
(void) fprintf(stderr, gettext("pthread_cond_timedwait "
"failed: %s\n"), strerror(ret));
zpool_close(zhp);
return (void *)(uintptr_t)(1);
}
}
zpool_close(zhp);
return (void *)(0);
}
int
zpool_do_wait(int argc, char **argv)
{
boolean_t verbose = B_FALSE;
int c, i;
unsigned long count;
pthread_t status_thr;
int error = 0;
zpool_handle_t *zhp;
wait_data_t wd;
wd.wd_scripted = B_FALSE;
wd.wd_exact = B_FALSE;
wd.wd_headers_once = B_FALSE;
wd.wd_should_exit = B_FALSE;
pthread_mutex_init(&wd.wd_mutex, NULL);
pthread_cond_init(&wd.wd_cv, NULL);
/* By default, wait for all types of activity. */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
wd.wd_enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
switch (c) {
case 'H':
wd.wd_scripted = B_TRUE;
break;
case 'n':
wd.wd_headers_once = B_TRUE;
break;
case 'p':
wd.wd_exact = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 't':
/* Reset activities array */
memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_opts[] = {
"discard", "free", "initialize", "replace",
"remove", "resilver", "scrub", "trim",
"raidz_expand" };
for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
if (strcmp(tok, col_opts[i]) == 0) {
wd.wd_enabled[i] = B_TRUE;
goto found;
}
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"), tok);
usage(B_FALSE);
found:;
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &wd.wd_interval, &count);
if (count != 0) {
/* This subcmd only accepts an interval, not a count */
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (wd.wd_interval != 0)
verbose = B_TRUE;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'pool' argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
wd.wd_poolname = argv[0];
if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
return (1);
if (verbose) {
/*
* We use a separate thread for printing status updates because
* the main thread will call lzc_wait(), which blocks as long
* as an activity is in progress, which can be a long time.
*/
if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
!= 0) {
(void) fprintf(stderr, gettext("failed to create status"
"thread: %s\n"), strerror(errno));
zpool_close(zhp);
return (1);
}
}
/*
* Loop over all activities that we are supposed to wait for until none
* of them are in progress. Note that this means we can end up waiting
* for more activities to complete than just those that were in progress
* when we began waiting; if an activity we are interested in begins
* while we are waiting for another activity, we will wait for both to
* complete before exiting.
*/
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!wd.wd_enabled[i])
continue;
error = zpool_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zpool_close(zhp);
if (verbose) {
uintptr_t status;
pthread_mutex_lock(&wd.wd_mutex);
wd.wd_should_exit = B_TRUE;
pthread_cond_signal(&wd.wd_cv);
pthread_mutex_unlock(&wd.wd_mutex);
(void) pthread_join(status_thr, (void *)&status);
if (status != 0)
error = status;
}
pthread_mutex_destroy(&wd.wd_mutex);
pthread_cond_destroy(&wd.wd_cv);
return (error);
}
/*
* zpool ddtprune -d|-p <amount> <pool>
*
* -d <days> Prune entries <days> old and older
* -p <percent> Prune <percent> amount of entries
*
* Prune single reference entries from DDT to satisfy the amount specified.
*/
int
zpool_do_ddt_prune(int argc, char **argv)
{
zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE;
uint64_t amount = 0;
zpool_handle_t *zhp;
char *endptr;
int c;
while ((c = getopt(argc, argv, "d:p:")) != -1) {
switch (c) {
case 'd':
if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) {
(void) fprintf(stderr, gettext("-d cannot be "
"combined with -p option\n"));
usage(B_FALSE);
}
errno = 0;
amount = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0' || amount == 0) {
(void) fprintf(stderr,
gettext("invalid days value\n"));
usage(B_FALSE);
}
amount *= 86400; /* convert days to seconds */
unit = ZPOOL_DDT_PRUNE_AGE;
break;
case 'p':
if (unit == ZPOOL_DDT_PRUNE_AGE) {
(void) fprintf(stderr, gettext("-p cannot be "
"combined with -d option\n"));
usage(B_FALSE);
}
errno = 0;
amount = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0' ||
amount == 0 || amount > 100) {
(void) fprintf(stderr,
gettext("invalid percentage value\n"));
usage(B_FALSE);
}
unit = ZPOOL_DDT_PRUNE_PERCENTAGE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (unit == ZPOOL_DDT_PRUNE_NONE) {
(void) fprintf(stderr,
gettext("missing amount option (-d|-p <value>)\n"));
usage(B_FALSE);
} else if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
} else if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zhp = zpool_open(g_zfs, argv[0]);
if (zhp == NULL)
return (-1);
int error = zpool_ddt_prune(zhp, unit, amount);
zpool_close(zhp);
return (error);
}
static int
find_command_idx(const char *command, int *idx)
{
for (int i = 0; i < NCOMMAND; ++i) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
/*
* Display version message
*/
static int
zpool_do_version(int argc, char **argv)
{
int c;
nvlist_t *jsobj = NULL, *zfs_ver = NULL;
boolean_t json = B_FALSE;
struct option long_options[] = {
{"json", no_argument, NULL, 'j'},
};
while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
switch (c) {
case 'j':
json = B_TRUE;
jsobj = zpool_json_schema(0, 1);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
if (argc != 0) {
(void) fprintf(stderr, "too many arguments\n");
usage(B_FALSE);
}
if (json) {
zfs_ver = zfs_version_nvlist();
if (zfs_ver) {
fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);
zcmd_print_json(jsobj);
fnvlist_free(zfs_ver);
return (0);
} else
return (-1);
} else
return (zfs_version_print() != 0);
}
/* Display documentation */
static int
zpool_do_help(int argc, char **argv)
{
char page[MAXNAMELEN];
if (argc < 3 || strcmp(argv[2], "zpool") == 0)
strcpy(page, "zpool");
else if (strcmp(argv[2], "concepts") == 0 ||
strcmp(argv[2], "props") == 0)
snprintf(page, sizeof (page), "zpool%s", argv[2]);
else
snprintf(page, sizeof (page), "zpool-%s", argv[2]);
execlp("man", "man", page, NULL);
fprintf(stderr, "couldn't run man program: %s", strerror(errno));
return (-1);
}
/*
* Do zpool_load_compat() and print error message on failure
*/
static zpool_compat_status_t
zpool_do_load_compat(const char *compat, boolean_t *list)
{
char report[1024];
zpool_compat_status_t ret;
ret = zpool_load_compat(compat, list, report, 1024);
switch (ret) {
case ZPOOL_COMPATIBILITY_OK:
break;
case ZPOOL_COMPATIBILITY_NOFILES:
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
(void) fprintf(stderr, "Error: %s\n", report);
break;
case ZPOOL_COMPATIBILITY_WARNTOKEN:
(void) fprintf(stderr, "Warning: %s\n", report);
ret = ZPOOL_COMPATIBILITY_OK;
break;
}
return (ret);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
srand(time(NULL));
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zfs_version_print() != 0);
/*
* Special case 'help'
*/
if (strcmp(cmdname, "help") == 0)
return (zpool_do_help(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
libzfs_print_on_error(g_zfs, B_TRUE);
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=')) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
/*
* 'freeze' is a vile debugging abomination, so we treat
* it as such.
*/
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to freeze pool: %d\n"), errno);
ret = 1;
}
log_history = 0;
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
diff --git a/sys/contrib/openzfs/config/kernel-automount.m4 b/sys/contrib/openzfs/config/kernel-automount.m4
index 52f1931b748e..b5f1392d0fcd 100644
--- a/sys/contrib/openzfs/config/kernel-automount.m4
+++ b/sys/contrib/openzfs/config/kernel-automount.m4
@@ -1,25 +1,62 @@
dnl #
dnl # 2.6.37 API change
dnl # The dops->d_automount() dentry operation was added as a clean
dnl # solution to handling automounts. Prior to this cifs/nfs clients
dnl # which required automount support would abuse the follow_link()
dnl # operation on directories for this purpose.
dnl #
-AC_DEFUN([ZFS_AC_KERNEL_SRC_AUTOMOUNT], [
+AC_DEFUN([ZFS_AC_KERNEL_SRC_D_AUTOMOUNT], [
ZFS_LINUX_TEST_SRC([dentry_operations_d_automount], [
#include <linux/dcache.h>
static struct vfsmount *d_automount(struct path *p) { return NULL; }
struct dentry_operations dops __attribute__ ((unused)) = {
.d_automount = d_automount,
};
])
])
-AC_DEFUN([ZFS_AC_KERNEL_AUTOMOUNT], [
+AC_DEFUN([ZFS_AC_KERNEL_D_AUTOMOUNT], [
AC_MSG_CHECKING([whether dops->d_automount() exists])
ZFS_LINUX_TEST_RESULT([dentry_operations_d_automount], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([dops->d_automount()])
])
])
+
+dnl #
+dnl # 6.14 API change
+dnl # dops->d_revalidate now has four args.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_D_REVALIDATE_4ARGS], [
+ ZFS_LINUX_TEST_SRC([dentry_operations_d_revalidate_4args], [
+ #include <linux/dcache.h>
+ static int d_revalidate(struct inode *dir,
+ const struct qstr *name, struct dentry *dentry,
+ unsigned int fl) { return 0; }
+ struct dentry_operations dops __attribute__ ((unused)) = {
+ .d_revalidate = d_revalidate,
+ };
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_D_REVALIDATE_4ARGS], [
+ AC_MSG_CHECKING([whether dops->d_revalidate() takes 4 args])
+ ZFS_LINUX_TEST_RESULT([dentry_operations_d_revalidate_4args], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_D_REVALIDATE_4ARGS, 1,
+ [dops->d_revalidate() takes 4 args])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SRC_AUTOMOUNT], [
+ ZFS_AC_KERNEL_SRC_D_AUTOMOUNT
+ ZFS_AC_KERNEL_SRC_D_REVALIDATE_4ARGS
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_AUTOMOUNT], [
+ ZFS_AC_KERNEL_D_AUTOMOUNT
+ ZFS_AC_KERNEL_D_REVALIDATE_4ARGS
+])
diff --git a/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4 b/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4
index a223343030db..dc4e11cef2e9 100644
--- a/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4
+++ b/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4
@@ -1,93 +1,120 @@
dnl #
dnl # Check for available iov_iter functionality.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_IOV_ITER], [
ZFS_LINUX_TEST_SRC([fault_in_iov_iter_readable], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
size_t size = 512;
int error __attribute__ ((unused));
error = fault_in_iov_iter_readable(&iter, size);
])
ZFS_LINUX_TEST_SRC([iov_iter_type], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
__attribute__((unused)) enum iter_type i = iov_iter_type(&iter);
])
+ ZFS_LINUX_TEST_SRC([iov_iter_get_pages2], [
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ struct page **pages = NULL;
+ size_t maxsize = 4096;
+ unsigned maxpages = 1;
+ size_t start;
+ size_t ret __attribute__ ((unused));
+
+ ret = iov_iter_get_pages2(&iter, pages, maxsize, maxpages,
+ &start);
+ ])
+
ZFS_LINUX_TEST_SRC([iter_is_ubuf], [
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
bool ret __attribute__((unused));
ret = iter_is_ubuf(&iter);
])
ZFS_LINUX_TEST_SRC([iter_iov], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
__attribute__((unused)) const struct iovec *iov = iter_iov(&iter);
])
])
AC_DEFUN([ZFS_AC_KERNEL_VFS_IOV_ITER], [
AC_MSG_CHECKING([whether fault_in_iov_iter_readable() is available])
ZFS_LINUX_TEST_RESULT([fault_in_iov_iter_readable], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_FAULT_IN_IOV_ITER_READABLE, 1,
[fault_in_iov_iter_readable() is available])
],[
AC_MSG_RESULT(no)
])
dnl #
dnl # This checks for iov_iter_type() in linux/uio.h. It is not
dnl # required, however, and the module will compiled without it
dnl # using direct access of the member attribute
dnl #
AC_MSG_CHECKING([whether iov_iter_type() is available])
ZFS_LINUX_TEST_RESULT([iov_iter_type], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOV_ITER_TYPE, 1,
[iov_iter_type() is available])
],[
AC_MSG_RESULT(no)
])
+
+ dnl #
+ dnl # Kernel 6.0 changed iov_iter_get_pages() to iov_iter_get_pages2().
+ dnl #
+ AC_MSG_CHECKING([whether iov_iter_get_pages2() is available])
+ ZFS_LINUX_TEST_RESULT([iov_iter_get_pages2], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IOV_ITER_GET_PAGES2, 1,
+ [iov_iter_get_pages2() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
dnl #
dnl # Kernel 6.0 introduced the ITER_UBUF iov_iter type. iter_is_ubuf()
dnl # was also added to determine if the iov_iter is an ITER_UBUF.
dnl #
AC_MSG_CHECKING([whether iter_is_ubuf() is available])
ZFS_LINUX_TEST_RESULT([iter_is_ubuf], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ITER_IS_UBUF, 1, [iter_is_ubuf() is available])
],[
AC_MSG_RESULT(no)
])
dnl #
dnl # Kernel 6.5 introduces the iter_iov() function that returns the
dnl # __iov member of an iov_iter*. The iov member was renamed to this
dnl # __iov member, and is intended to be accessed via the helper
dnl # function now.
dnl #
AC_MSG_CHECKING([whether iter_iov() is available])
ZFS_LINUX_TEST_RESULT([iter_iov], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ITER_IOV, 1,
[iter_iov() is available])
],[
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
index 08a8640669b3..c617a6e6b370 100644
--- a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
+++ b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
@@ -1,931 +1,1087 @@
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Copyright (c) 2020, Felix Dörre
* All rights reserved.
*/
#include <sys/dsl_crypt.h>
#include <sys/byteorder.h>
#include <libzfs.h>
#include <syslog.h>
#include <sys/zio_crypt.h>
#include <openssl/evp.h>
#define PAM_SM_AUTH
#define PAM_SM_PASSWORD
#define PAM_SM_SESSION
#include <security/pam_modules.h>
#if defined(__linux__)
#include <security/pam_ext.h>
#define MAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS
#elif defined(__FreeBSD__)
#include <security/pam_appl.h>
static void
pam_syslog(pam_handle_t *pamh, int loglevel, const char *fmt, ...)
{
(void) pamh;
va_list args;
va_start(args, fmt);
vsyslog(loglevel, fmt, args);
va_end(args);
}
#define MAP_FLAGS MAP_PRIVATE | MAP_ANON | MAP_NOCORE
#endif
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/file.h>
#include <sys/wait.h>
#include <pwd.h>
+#include <lib/libzfs/libzfs_impl.h>
#include <sys/mman.h>
static const char PASSWORD_VAR_NAME[] = "pam_zfs_key_authtok";
static const char OLD_PASSWORD_VAR_NAME[] = "pam_zfs_key_oldauthtok";
static libzfs_handle_t *g_zfs;
static void destroy_pw(pam_handle_t *pamh, void *data, int errcode);
typedef int (*mlock_func_t) (const void *, size_t);
typedef struct {
size_t len;
char *value;
} pw_password_t;
/*
* Try to mlock(2) or munlock(2) addr while handling EAGAIN by retrying ten
* times and sleeping 10 milliseconds in between for a total of 0.1
* seconds. lock_func must point to either mlock(2) or munlock(2).
*/
static int
try_lock(mlock_func_t lock_func, const void *addr, size_t len)
{
int err;
int retries = 10;
useconds_t sleep_dur = 10 * 1000;
if ((err = (*lock_func)(addr, len)) != EAGAIN) {
return (err);
}
for (int i = retries; i > 0; --i) {
(void) usleep(sleep_dur);
if ((err = (*lock_func)(addr, len)) != EAGAIN) {
break;
}
}
return (err);
}
static pw_password_t *
alloc_pw_size(size_t len)
{
pw_password_t *pw = malloc(sizeof (pw_password_t));
if (!pw) {
return (NULL);
}
pw->len = len;
/*
* We use mmap(2) rather than malloc(3) since later on we mlock(2) the
* memory region. Since mlock(2) and munlock(2) operate on whole memory
* pages we should allocate a whole page here as mmap(2) does. Further
* this ensures that the addresses passed to mlock(2) an munlock(2) are
* on a page boundary as suggested by FreeBSD and required by some
* other implementations. Finally we avoid inadvertently munlocking
* memory mlocked by an concurrently running instance of us.
*/
pw->value = mmap(NULL, pw->len, PROT_READ | PROT_WRITE, MAP_FLAGS,
-1, 0);
if (pw->value == MAP_FAILED) {
free(pw);
return (NULL);
}
if (try_lock(mlock, pw->value, pw->len) != 0) {
(void) munmap(pw->value, pw->len);
free(pw);
return (NULL);
}
return (pw);
}
static pw_password_t *
alloc_pw_string(const char *source)
{
size_t len = strlen(source) + 1;
pw_password_t *pw = alloc_pw_size(len);
if (!pw) {
return (NULL);
}
memcpy(pw->value, source, pw->len);
return (pw);
}
static void
pw_free(pw_password_t *pw)
{
memset(pw->value, 0, pw->len);
if (try_lock(munlock, pw->value, pw->len) == 0) {
(void) munmap(pw->value, pw->len);
}
free(pw);
}
static pw_password_t *
pw_fetch(pam_handle_t *pamh, int tok)
{
const char *token;
if (pam_get_authtok(pamh, tok, &token, NULL) != PAM_SUCCESS) {
pam_syslog(pamh, LOG_ERR,
"couldn't get password from PAM stack");
return (NULL);
}
if (!token) {
pam_syslog(pamh, LOG_ERR,
"token from PAM stack is null");
return (NULL);
}
return (alloc_pw_string(token));
}
static const pw_password_t *
pw_fetch_lazy(pam_handle_t *pamh, int tok, const char *var_name)
{
pw_password_t *pw = pw_fetch(pamh, tok);
if (pw == NULL) {
return (NULL);
}
int ret = pam_set_data(pamh, var_name, pw, destroy_pw);
if (ret != PAM_SUCCESS) {
pw_free(pw);
pam_syslog(pamh, LOG_ERR, "pam_set_data failed");
return (NULL);
}
return (pw);
}
static const pw_password_t *
pw_get(pam_handle_t *pamh, int tok, const char *var_name)
{
const pw_password_t *authtok = NULL;
int ret = pam_get_data(pamh, var_name,
(const void**)(&authtok));
if (ret == PAM_SUCCESS)
return (authtok);
if (ret == PAM_NO_MODULE_DATA)
return (pw_fetch_lazy(pamh, tok, var_name));
pam_syslog(pamh, LOG_ERR, "password not available");
return (NULL);
}
static int
pw_clear(pam_handle_t *pamh, const char *var_name)
{
int ret = pam_set_data(pamh, var_name, NULL, NULL);
if (ret != PAM_SUCCESS) {
pam_syslog(pamh, LOG_ERR, "clearing password failed");
return (-1);
}
return (0);
}
static void
destroy_pw(pam_handle_t *pamh, void *data, int errcode)
{
(void) pamh, (void) errcode;
if (data != NULL) {
pw_free((pw_password_t *)data);
}
}
static int
pam_zfs_init(pam_handle_t *pamh)
{
int error = 0;
if ((g_zfs = libzfs_init()) == NULL) {
error = errno;
pam_syslog(pamh, LOG_ERR, "Zfs initialization error: %s",
libzfs_error_init(error));
}
return (error);
}
static void
pam_zfs_free(void)
{
libzfs_fini(g_zfs);
}
static pw_password_t *
prepare_passphrase(pam_handle_t *pamh, zfs_handle_t *ds,
const char *passphrase, nvlist_t *nvlist)
{
pw_password_t *key = alloc_pw_size(WRAPPING_KEY_LEN);
if (!key) {
return (NULL);
}
uint64_t salt;
uint64_t iters;
if (nvlist != NULL) {
int fd = open("/dev/urandom", O_RDONLY);
if (fd < 0) {
pw_free(key);
return (NULL);
}
int bytes_read = 0;
char *buf = (char *)&salt;
size_t bytes = sizeof (uint64_t);
while (bytes_read < bytes) {
ssize_t len = read(fd, buf + bytes_read, bytes
- bytes_read);
if (len < 0) {
close(fd);
pw_free(key);
return (NULL);
}
bytes_read += len;
}
close(fd);
if (nvlist_add_uint64(nvlist,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), salt)) {
pam_syslog(pamh, LOG_ERR,
"failed to add salt to nvlist");
pw_free(key);
return (NULL);
}
iters = DEFAULT_PBKDF2_ITERATIONS;
if (nvlist_add_uint64(nvlist, zfs_prop_to_name(
ZFS_PROP_PBKDF2_ITERS), iters)) {
pam_syslog(pamh, LOG_ERR,
"failed to add iters to nvlist");
pw_free(key);
return (NULL);
}
} else {
salt = zfs_prop_get_int(ds, ZFS_PROP_PBKDF2_SALT);
iters = zfs_prop_get_int(ds, ZFS_PROP_PBKDF2_ITERS);
}
salt = LE_64(salt);
if (!PKCS5_PBKDF2_HMAC_SHA1((char *)passphrase,
strlen(passphrase), (uint8_t *)&salt,
sizeof (uint64_t), iters, WRAPPING_KEY_LEN,
(uint8_t *)key->value)) {
pam_syslog(pamh, LOG_ERR, "pbkdf failed");
pw_free(key);
return (NULL);
}
return (key);
}
static int
is_key_loaded(pam_handle_t *pamh, const char *ds_name)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
int keystatus = zfs_prop_get_int(ds, ZFS_PROP_KEYSTATUS);
zfs_close(ds);
return (keystatus != ZFS_KEYSTATUS_UNAVAILABLE);
}
static int
change_key(pam_handle_t *pamh, const char *ds_name,
const char *passphrase)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
nvlist_t *nvlist = fnvlist_alloc();
pw_password_t *key = prepare_passphrase(pamh, ds, passphrase, nvlist);
if (key == NULL) {
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
if (nvlist_add_string(nvlist,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
"prompt")) {
pam_syslog(pamh, LOG_ERR, "nvlist_add failed for keylocation");
pw_free(key);
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
if (nvlist_add_uint64(nvlist,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT),
ZFS_KEYFORMAT_PASSPHRASE)) {
pam_syslog(pamh, LOG_ERR, "nvlist_add failed for keyformat");
pw_free(key);
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
int ret = lzc_change_key(ds_name, DCP_CMD_NEW_KEY, nvlist,
(uint8_t *)key->value, WRAPPING_KEY_LEN);
pw_free(key);
if (ret) {
pam_syslog(pamh, LOG_ERR, "change_key failed: %d", ret);
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
nvlist_free(nvlist);
zfs_close(ds);
return (0);
}
-static int
-decrypt_mount(pam_handle_t *pamh, const char *ds_name,
- const char *passphrase, boolean_t noop)
-{
- zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
- if (ds == NULL) {
- pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
- return (-1);
- }
- pw_password_t *key = prepare_passphrase(pamh, ds, passphrase, NULL);
- if (key == NULL) {
- zfs_close(ds);
- return (-1);
- }
- int ret = lzc_load_key(ds_name, noop, (uint8_t *)key->value,
- WRAPPING_KEY_LEN);
- pw_free(key);
- if (ret && ret != EEXIST) {
- pam_syslog(pamh, LOG_ERR, "load_key failed: %d", ret);
- zfs_close(ds);
- return (-1);
- }
- if (noop) {
- goto out;
- }
- ret = zfs_mount(ds, NULL, 0);
- if (ret) {
- pam_syslog(pamh, LOG_ERR, "mount failed: %d", ret);
- zfs_close(ds);
- return (-1);
- }
-out:
- zfs_close(ds);
- return (0);
-}
-
-static int
-unmount_unload(pam_handle_t *pamh, const char *ds_name, boolean_t force)
-{
- zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
- if (ds == NULL) {
- pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
- return (-1);
- }
- int ret = zfs_unmount(ds, NULL, force ? MS_FORCE : 0);
- if (ret) {
- pam_syslog(pamh, LOG_ERR, "zfs_unmount failed with: %d", ret);
- zfs_close(ds);
- return (-1);
- }
-
- ret = lzc_unload_key(ds_name);
- if (ret) {
- pam_syslog(pamh, LOG_ERR, "unload_key failed with: %d", ret);
- zfs_close(ds);
- return (-1);
- }
- zfs_close(ds);
- return (0);
-}
-
typedef struct {
char *homes_prefix;
char *runstatedir;
char *homedir;
char *dsname;
uid_t uid_min;
uid_t uid_max;
uid_t uid;
const char *username;
boolean_t unmount_and_unload;
boolean_t force_unmount;
boolean_t recursive_homes;
+ boolean_t mount_recursively;
} zfs_key_config_t;
static int
zfs_key_config_load(pam_handle_t *pamh, zfs_key_config_t *config,
int argc, const char **argv)
{
config->homes_prefix = strdup("rpool/home");
if (config->homes_prefix == NULL) {
pam_syslog(pamh, LOG_ERR, "strdup failure");
return (PAM_SERVICE_ERR);
}
config->runstatedir = strdup(RUNSTATEDIR "/pam_zfs_key");
if (config->runstatedir == NULL) {
pam_syslog(pamh, LOG_ERR, "strdup failure");
free(config->homes_prefix);
return (PAM_SERVICE_ERR);
}
const char *name;
if (pam_get_user(pamh, &name, NULL) != PAM_SUCCESS) {
pam_syslog(pamh, LOG_ERR,
"couldn't get username from PAM stack");
free(config->runstatedir);
free(config->homes_prefix);
return (PAM_SERVICE_ERR);
}
struct passwd *entry = getpwnam(name);
if (!entry) {
free(config->runstatedir);
free(config->homes_prefix);
return (PAM_USER_UNKNOWN);
}
config->uid_min = 1000;
config->uid_max = MAXUID;
config->uid = entry->pw_uid;
config->username = name;
config->unmount_and_unload = B_TRUE;
config->force_unmount = B_FALSE;
config->recursive_homes = B_FALSE;
+ config->mount_recursively = B_FALSE;
config->dsname = NULL;
config->homedir = NULL;
for (int c = 0; c < argc; c++) {
if (strncmp(argv[c], "homes=", 6) == 0) {
free(config->homes_prefix);
config->homes_prefix = strdup(argv[c] + 6);
} else if (strncmp(argv[c], "runstatedir=", 12) == 0) {
free(config->runstatedir);
config->runstatedir = strdup(argv[c] + 12);
} else if (strncmp(argv[c], "uid_min=", 8) == 0) {
sscanf(argv[c] + 8, "%u", &config->uid_min);
} else if (strncmp(argv[c], "uid_max=", 8) == 0) {
sscanf(argv[c] + 8, "%u", &config->uid_max);
} else if (strcmp(argv[c], "nounmount") == 0) {
config->unmount_and_unload = B_FALSE;
} else if (strcmp(argv[c], "forceunmount") == 0) {
config->force_unmount = B_TRUE;
} else if (strcmp(argv[c], "recursive_homes") == 0) {
config->recursive_homes = B_TRUE;
+ } else if (strcmp(argv[c], "mount_recursively") == 0) {
+ config->mount_recursively = B_TRUE;
} else if (strcmp(argv[c], "prop_mountpoint") == 0) {
if (config->homedir == NULL)
config->homedir = strdup(entry->pw_dir);
}
}
return (PAM_SUCCESS);
}
+typedef struct {
+ pam_handle_t *pamh;
+ zfs_key_config_t *target;
+} mount_umount_dataset_data_t;
+
+static int
+mount_dataset(zfs_handle_t *zhp, void *data)
+{
+ mount_umount_dataset_data_t *mount_umount_dataset_data = data;
+
+ zfs_key_config_t *target = mount_umount_dataset_data->target;
+ pam_handle_t *pamh = mount_umount_dataset_data->pamh;
+
+ /* Refresh properties to get the latest key status */
+ zfs_refresh_properties(zhp);
+
+ int ret = 0;
+
+ /* Check if dataset type is filesystem */
+ if (zhp->zfs_type != ZFS_TYPE_FILESYSTEM) {
+ pam_syslog(pamh, LOG_DEBUG,
+ "dataset is not filesystem: %s, skipping.",
+ zfs_get_name(zhp));
+ return (0);
+ }
+
+ /* Check if encryption key is available */
+ if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
+ ZFS_KEYSTATUS_UNAVAILABLE) {
+ pam_syslog(pamh, LOG_WARNING,
+ "key unavailable for: %s, skipping",
+ zfs_get_name(zhp));
+ return (0);
+ }
+
+ /* Check if prop canmount is on */
+ if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) != ZFS_CANMOUNT_ON) {
+ pam_syslog(pamh, LOG_INFO,
+ "canmount is not on for: %s, skipping",
+ zfs_get_name(zhp));
+ return (0);
+ }
+
+ /* Get mountpoint prop for check */
+ char mountpoint[ZFS_MAXPROPLEN];
+ if ((ret = zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
+ sizeof (mountpoint), NULL, NULL, 0, 1)) != 0) {
+ pam_syslog(pamh, LOG_ERR,
+ "failed to get mountpoint prop: %d", ret);
+ return (-1);
+ }
+
+ /* Check if mountpoint isn't none or legacy */
+ if (strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) == 0 ||
+ strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) == 0) {
+ pam_syslog(pamh, LOG_INFO,
+ "mountpoint is none or legacy for: %s, skipping",
+ zfs_get_name(zhp));
+ return (0);
+ }
+
+ /* Don't mount the dataset if already mounted */
+ if (zfs_is_mounted(zhp, NULL)) {
+ pam_syslog(pamh, LOG_INFO, "already mounted: %s",
+ zfs_get_name(zhp));
+ return (0);
+ }
+
+ /* Mount the dataset */
+ ret = zfs_mount(zhp, NULL, 0);
+ if (ret) {
+ pam_syslog(pamh, LOG_ERR,
+ "zfs_mount failed for %s with: %d", zfs_get_name(zhp),
+ ret);
+ return (ret);
+ }
+
+ /* Recursively mount children if the recursive flag is set */
+ if (target->mount_recursively) {
+ ret = zfs_iter_filesystems_v2(zhp, 0, mount_dataset, data);
+ if (ret != 0) {
+ pam_syslog(pamh, LOG_ERR,
+ "child iteration failed: %d", ret);
+ return (-1);
+ }
+ }
+
+ return (ret);
+}
+
+static int
+umount_dataset(zfs_handle_t *zhp, void *data)
+{
+ mount_umount_dataset_data_t *mount_umount_dataset_data = data;
+
+ zfs_key_config_t *target = mount_umount_dataset_data->target;
+ pam_handle_t *pamh = mount_umount_dataset_data->pamh;
+
+ int ret = 0;
+ /* Recursively umount children if the recursive flag is set */
+ if (target->mount_recursively) {
+ ret = zfs_iter_filesystems_v2(zhp, 0, umount_dataset, data);
+ if (ret != 0) {
+ pam_syslog(pamh, LOG_ERR,
+ "child iteration failed: %d", ret);
+ return (-1);
+ }
+ }
+
+ /* Check if dataset type is filesystem */
+ if (zhp->zfs_type != ZFS_TYPE_FILESYSTEM) {
+ pam_syslog(pamh, LOG_DEBUG,
+ "dataset is not filesystem: %s, skipping",
+ zfs_get_name(zhp));
+ return (0);
+ }
+
+ /* Don't umount the dataset if already unmounted */
+ if (zfs_is_mounted(zhp, NULL) == 0) {
+ pam_syslog(pamh, LOG_INFO, "already unmounted: %s",
+ zfs_get_name(zhp));
+ return (0);
+ }
+
+ /* Unmount the dataset */
+ ret = zfs_unmount(zhp, NULL, target->force_unmount ? MS_FORCE : 0);
+ if (ret) {
+ pam_syslog(pamh, LOG_ERR,
+ "zfs_unmount failed for %s with: %d", zfs_get_name(zhp),
+ ret);
+ return (ret);
+ }
+
+ return (ret);
+}
+
+static int
+decrypt_mount(pam_handle_t *pamh, zfs_key_config_t *config, const char *ds_name,
+ const char *passphrase, boolean_t noop)
+{
+ zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
+ if (ds == NULL) {
+ pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
+ return (-1);
+ }
+ pw_password_t *key = prepare_passphrase(pamh, ds, passphrase, NULL);
+ if (key == NULL) {
+ zfs_close(ds);
+ return (-1);
+ }
+ int ret = lzc_load_key(ds_name, noop, (uint8_t *)key->value,
+ WRAPPING_KEY_LEN);
+ pw_free(key);
+ if (ret && ret != EEXIST) {
+ pam_syslog(pamh, LOG_ERR, "load_key failed: %d", ret);
+ zfs_close(ds);
+ return (-1);
+ }
+
+ if (noop) {
+ zfs_close(ds);
+ return (0);
+ }
+
+ mount_umount_dataset_data_t data;
+ data.pamh = pamh;
+ data.target = config;
+
+ ret = mount_dataset(ds, &data);
+ if (ret != 0) {
+ pam_syslog(pamh, LOG_ERR, "mount failed: %d", ret);
+ zfs_close(ds);
+ return (-1);
+ }
+
+ zfs_close(ds);
+ return (0);
+}
+
+static int
+unmount_unload(pam_handle_t *pamh, const char *ds_name,
+ zfs_key_config_t *target)
+{
+ zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
+ if (ds == NULL) {
+ pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
+ return (-1);
+ }
+
+ mount_umount_dataset_data_t data;
+ data.pamh = pamh;
+ data.target = target;
+
+ int ret = umount_dataset(ds, &data);
+ if (ret) {
+ pam_syslog(pamh, LOG_ERR,
+ "unmount_dataset failed with: %d", ret);
+ zfs_close(ds);
+ return (-1);
+ }
+
+ ret = lzc_unload_key(ds_name);
+ if (ret) {
+ pam_syslog(pamh, LOG_ERR, "unload_key failed with: %d", ret);
+ zfs_close(ds);
+ return (-1);
+ }
+ zfs_close(ds);
+ return (0);
+}
+
static void
zfs_key_config_free(zfs_key_config_t *config)
{
free(config->homes_prefix);
free(config->runstatedir);
free(config->homedir);
free(config->dsname);
}
static int
find_dsname_by_prop_value(zfs_handle_t *zhp, void *data)
{
zfs_type_t type = zfs_get_type(zhp);
zfs_key_config_t *target = data;
char mountpoint[ZFS_MAXPROPLEN];
/* Skip any datasets whose type does not match */
if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
zfs_close(zhp);
return (0);
}
/* Skip any datasets whose mountpoint does not match */
(void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE);
if (strcmp(target->homedir, mountpoint) != 0) {
if (target->recursive_homes) {
(void) zfs_iter_filesystems_v2(zhp, 0,
find_dsname_by_prop_value, target);
}
zfs_close(zhp);
return (target->dsname != NULL);
}
target->dsname = strdup(zfs_get_name(zhp));
zfs_close(zhp);
return (1);
}
static char *
-zfs_key_config_get_dataset(zfs_key_config_t *config)
+zfs_key_config_get_dataset(pam_handle_t *pamh, zfs_key_config_t *config)
{
if (config->homedir != NULL &&
config->homes_prefix != NULL) {
if (strcmp(config->homes_prefix, "*") == 0) {
(void) zfs_iter_root(g_zfs,
find_dsname_by_prop_value, config);
} else {
zfs_handle_t *zhp = zfs_open(g_zfs,
config->homes_prefix, ZFS_TYPE_FILESYSTEM);
if (zhp == NULL) {
- pam_syslog(NULL, LOG_ERR,
+ pam_syslog(pamh, LOG_ERR,
"dataset %s not found",
config->homes_prefix);
return (NULL);
}
(void) zfs_iter_filesystems_v2(zhp, 0,
find_dsname_by_prop_value, config);
zfs_close(zhp);
}
char *dsname = config->dsname;
config->dsname = NULL;
return (dsname);
}
if (config->homes_prefix == NULL) {
return (NULL);
}
size_t len = ZFS_MAX_DATASET_NAME_LEN;
size_t total_len = strlen(config->homes_prefix) + 1
+ strlen(config->username);
if (total_len > len) {
return (NULL);
}
char *ret = malloc(len + 1);
if (!ret) {
return (NULL);
}
ret[0] = 0;
(void) snprintf(ret, len + 1, "%s/%s", config->homes_prefix,
config->username);
return (ret);
}
static int
zfs_key_config_modify_session_counter(pam_handle_t *pamh,
zfs_key_config_t *config, int delta)
{
const char *runtime_path = config->runstatedir;
if (mkdir(runtime_path, S_IRWXU) != 0 && errno != EEXIST) {
pam_syslog(pamh, LOG_ERR, "Can't create runtime path: %d",
errno);
return (-1);
}
if (chown(runtime_path, 0, 0) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't chown runtime path: %d",
errno);
return (-1);
}
if (chmod(runtime_path, S_IRWXU) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't chmod runtime path: %d",
errno);
return (-1);
}
char *counter_path;
if (asprintf(&counter_path, "%s/%u", runtime_path, config->uid) == -1)
return (-1);
const int fd = open(counter_path,
O_RDWR | O_CLOEXEC | O_CREAT | O_NOFOLLOW,
S_IRUSR | S_IWUSR);
free(counter_path);
if (fd < 0) {
pam_syslog(pamh, LOG_ERR, "Can't open counter file: %d", errno);
return (-1);
}
if (flock(fd, LOCK_EX) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't lock counter file: %d", errno);
close(fd);
return (-1);
}
char counter[20];
char *pos = counter;
int remaining = sizeof (counter) - 1;
int ret;
counter[sizeof (counter) - 1] = 0;
while (remaining > 0 && (ret = read(fd, pos, remaining)) > 0) {
remaining -= ret;
pos += ret;
}
*pos = 0;
long int counter_value = strtol(counter, NULL, 10);
counter_value += delta;
if (counter_value < 0) {
counter_value = 0;
}
lseek(fd, 0, SEEK_SET);
if (ftruncate(fd, 0) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't truncate counter file: %d",
errno);
close(fd);
return (-1);
}
snprintf(counter, sizeof (counter), "%ld", counter_value);
remaining = strlen(counter);
pos = counter;
while (remaining > 0 && (ret = write(fd, pos, remaining)) > 0) {
remaining -= ret;
pos += ret;
}
close(fd);
return (counter_value);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_authenticate(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) flags;
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_SERVICE_ERR);
}
zfs_key_config_t config;
int config_err = zfs_key_config_load(pamh, &config, argc, argv);
if (config_err != PAM_SUCCESS) {
return (config_err);
}
if (config.uid < config.uid_min || config.uid > config.uid_max) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
const pw_password_t *token = pw_fetch_lazy(pamh,
PAM_AUTHTOK, PASSWORD_VAR_NAME);
if (token == NULL) {
zfs_key_config_free(&config);
return (PAM_AUTH_ERR);
}
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
- char *dataset = zfs_key_config_get_dataset(&config);
+ char *dataset = zfs_key_config_get_dataset(pamh, &config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
- if (decrypt_mount(pamh, dataset, token->value, B_TRUE) == -1) {
+ if (decrypt_mount(pamh, &config, dataset, token->value, B_TRUE) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_AUTH_ERR);
}
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_setcred(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) pamh, (void) flags, (void) argc, (void) argv;
return (PAM_SUCCESS);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_chauthtok(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_PERM_DENIED);
}
zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) != PAM_SUCCESS) {
return (PAM_SERVICE_ERR);
}
if (config.uid < config.uid_min || config.uid > config.uid_max) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
const pw_password_t *old_token = pw_get(pamh,
PAM_OLDAUTHTOK, OLD_PASSWORD_VAR_NAME);
{
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
- char *dataset = zfs_key_config_get_dataset(&config);
+ char *dataset = zfs_key_config_get_dataset(pamh, &config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
if (!old_token) {
pam_syslog(pamh, LOG_ERR,
"old password from PAM stack is null");
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
- if (decrypt_mount(pamh, dataset,
+ if (decrypt_mount(pamh, &config, dataset,
old_token->value, B_TRUE) == -1) {
pam_syslog(pamh, LOG_ERR,
"old token mismatch");
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_PERM_DENIED);
}
}
if ((flags & PAM_UPDATE_AUTHTOK) != 0) {
const pw_password_t *token = pw_get(pamh, PAM_AUTHTOK,
PASSWORD_VAR_NAME);
if (token == NULL) {
pam_syslog(pamh, LOG_ERR, "new password unavailable");
pam_zfs_free();
zfs_key_config_free(&config);
pw_clear(pamh, OLD_PASSWORD_VAR_NAME);
return (PAM_SERVICE_ERR);
}
- char *dataset = zfs_key_config_get_dataset(&config);
+ char *dataset = zfs_key_config_get_dataset(pamh, &config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
pw_clear(pamh, OLD_PASSWORD_VAR_NAME);
pw_clear(pamh, PASSWORD_VAR_NAME);
return (PAM_SERVICE_ERR);
}
int was_loaded = is_key_loaded(pamh, dataset);
- if (!was_loaded && decrypt_mount(pamh, dataset,
+ if (!was_loaded && decrypt_mount(pamh, &config, dataset,
old_token->value, B_FALSE) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
pw_clear(pamh, OLD_PASSWORD_VAR_NAME);
pw_clear(pamh, PASSWORD_VAR_NAME);
return (PAM_SERVICE_ERR);
}
int changed = change_key(pamh, dataset, token->value);
if (!was_loaded) {
- unmount_unload(pamh, dataset, config.force_unmount);
+ unmount_unload(pamh, dataset, &config);
}
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
if (pw_clear(pamh, OLD_PASSWORD_VAR_NAME) == -1 ||
pw_clear(pamh, PASSWORD_VAR_NAME) == -1 || changed == -1) {
return (PAM_SERVICE_ERR);
}
} else {
zfs_key_config_free(&config);
}
return (PAM_SUCCESS);
}
PAM_EXTERN int
pam_sm_open_session(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) flags;
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_SUCCESS);
}
zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) != PAM_SUCCESS) {
return (PAM_SESSION_ERR);
}
if (config.uid < config.uid_min || config.uid > config.uid_max) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
int counter = zfs_key_config_modify_session_counter(pamh, &config, 1);
if (counter != 1) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
const pw_password_t *token = pw_get(pamh,
PAM_AUTHTOK, PASSWORD_VAR_NAME);
if (token == NULL) {
zfs_key_config_free(&config);
return (PAM_SESSION_ERR);
}
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
- char *dataset = zfs_key_config_get_dataset(&config);
+ char *dataset = zfs_key_config_get_dataset(pamh, &config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
- if (decrypt_mount(pamh, dataset, token->value, B_FALSE) == -1) {
+ if (decrypt_mount(pamh, &config, dataset,
+ token->value, B_FALSE) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
if (pw_clear(pamh, PASSWORD_VAR_NAME) == -1) {
return (PAM_SERVICE_ERR);
}
return (PAM_SUCCESS);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_close_session(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) flags;
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_SUCCESS);
}
zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) != PAM_SUCCESS) {
return (PAM_SESSION_ERR);
}
if (config.uid < config.uid_min || config.uid > config.uid_max) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
int counter = zfs_key_config_modify_session_counter(pamh, &config, -1);
if (counter != 0) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
if (config.unmount_and_unload) {
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
- char *dataset = zfs_key_config_get_dataset(&config);
+ char *dataset = zfs_key_config_get_dataset(pamh, &config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SESSION_ERR);
}
- if (unmount_unload(pamh, dataset, config.force_unmount) == -1) {
+ if (unmount_unload(pamh, dataset, &config) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SESSION_ERR);
}
free(dataset);
pam_zfs_free();
}
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
index df7be6fc13f6..1479242de53b 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
@@ -1,140 +1,143 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SPL_MOD_H
#define _SPL_MOD_H
#include <sys/sysctl.h>
#define ZMOD_RW CTLFLAG_RWTUN
#define ZMOD_RD CTLFLAG_RDTUN
#define ZFS_MODULE_PARAM(scope_prefix, name_prefix, name, type, perm, desc) \
SYSCTL_DECL(_vfs_ ## scope_prefix); \
SYSCTL_##type(_vfs_ ## scope_prefix, OID_AUTO, name, perm, \
&name_prefix ## name, 0, desc)
#define ZFS_MODULE_PARAM_ARGS SYSCTL_HANDLER_ARGS
#define ZFS_MODULE_PARAM_CALL_IMPL(parent, name, perm, args, desc) \
SYSCTL_DECL(parent); \
SYSCTL_PROC(parent, OID_AUTO, name, CTLFLAG_MPSAFE | perm | args, desc)
#define ZFS_MODULE_PARAM_CALL( \
scope_prefix, name_prefix, name, func, _, perm, desc) \
ZFS_MODULE_PARAM_CALL_IMPL(_vfs_ ## scope_prefix, name, perm, \
func ## _args(name_prefix ## name), desc)
#define ZFS_MODULE_VIRTUAL_PARAM_CALL ZFS_MODULE_PARAM_CALL
#define param_set_arc_u64_args(var) \
CTLTYPE_U64, &var, 0, param_set_arc_u64, "QU"
#define param_set_arc_int_args(var) \
CTLTYPE_INT, &var, 0, param_set_arc_int, "I"
#define param_set_arc_min_args(var) \
CTLTYPE_U64, NULL, 0, param_set_arc_min, "QU"
#define param_set_arc_max_args(var) \
CTLTYPE_U64, NULL, 0, param_set_arc_max, "QU"
#define param_set_arc_free_target_args(var) \
CTLTYPE_UINT, NULL, 0, param_set_arc_free_target, "IU"
#define param_set_arc_no_grow_shift_args(var) \
CTLTYPE_INT, NULL, 0, param_set_arc_no_grow_shift, "I"
#define param_set_deadman_failmode_args(var) \
CTLTYPE_STRING, NULL, 0, param_set_deadman_failmode, "A"
#define param_set_active_allocator_args(var) \
CTLTYPE_STRING, NULL, 0, param_set_active_allocator, "A"
#define param_set_deadman_synctime_args(var) \
CTLTYPE_U64, NULL, 0, param_set_deadman_synctime, "QU"
#define param_set_deadman_ziotime_args(var) \
CTLTYPE_U64, NULL, 0, param_set_deadman_ziotime, "QU"
#define param_set_multihost_interval_args(var) \
CTLTYPE_U64, NULL, 0, param_set_multihost_interval, "QU"
#define param_set_slop_shift_args(var) \
CTLTYPE_INT, NULL, 0, param_set_slop_shift, "I"
#define param_set_min_auto_ashift_args(var) \
CTLTYPE_UINT, NULL, 0, param_set_min_auto_ashift, "IU"
#define param_set_max_auto_ashift_args(var) \
CTLTYPE_UINT, NULL, 0, param_set_max_auto_ashift, "IU"
+#define param_set_raidz_impl_args(var) \
+ CTLTYPE_STRING, NULL, 0, param_set_raidz_impl, "A"
+
#define spa_taskq_read_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, spa_taskq_read_param, "A"
#define spa_taskq_write_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, spa_taskq_write_param, "A"
#define fletcher_4_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, fletcher_4_param, "A"
#define blake3_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, blake3_param, "A"
#define sha256_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, sha256_param, "A"
#define sha512_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, sha512_param, "A"
#include <sys/kernel.h>
#define module_init(fn) \
static void \
wrap_ ## fn(void *dummy __unused) \
{ \
fn(); \
} \
SYSINIT(zfs_ ## fn, SI_SUB_LAST, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#define module_init_early(fn) \
static void \
wrap_ ## fn(void *dummy __unused) \
{ \
fn(); \
} \
SYSINIT(zfs_ ## fn, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#define module_exit(fn) \
static void \
wrap_ ## fn(void *dummy __unused) \
{ \
fn(); \
} \
SYSUNINIT(zfs_ ## fn, SI_SUB_LAST, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#endif /* SPL_MOD_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h b/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
index 9e7afea2ab34..fcb4a464c9e4 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
@@ -1,202 +1,212 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_UIO_H
#define _SPL_UIO_H
#include <sys/debug.h>
#include <linux/uio.h>
#include <linux/blkdev.h>
#include <linux/blkdev_compat.h>
#include <linux/mm.h>
#include <linux/bio.h>
#include <asm/uaccess.h>
#include <sys/types.h>
#include <sys/string.h>
/*
* uio_extflg: extended flags
*/
#define UIO_DIRECT 0x0001 /* Direct I/O request */
#if defined(HAVE_FAULT_IN_IOV_ITER_READABLE)
#define iov_iter_fault_in_readable(a, b) fault_in_iov_iter_readable(a, b)
#endif
typedef struct iovec iovec_t;
typedef enum zfs_uio_rw {
UIO_READ = 0,
UIO_WRITE = 1,
} zfs_uio_rw_t;
typedef enum zfs_uio_seg {
UIO_SYSSPACE = 0,
UIO_BVEC = 1,
UIO_ITER = 2,
} zfs_uio_seg_t;
/*
* This structures is used when doing Direct I/O.
*/
typedef struct {
struct page **pages; /* Mapped pages */
long npages; /* Number of mapped pages */
+ boolean_t pinned; /* Whether FOLL_PIN was used */
} zfs_uio_dio_t;
typedef struct zfs_uio {
union {
const struct iovec *uio_iov;
const struct bio_vec *uio_bvec;
struct iov_iter *uio_iter;
};
int uio_iovcnt; /* Number of iovecs */
offset_t uio_soffset; /* Starting logical offset */
offset_t uio_loffset; /* Current logical offset */
zfs_uio_seg_t uio_segflg; /* Segment type */
boolean_t uio_fault_disable;
uint16_t uio_fmode; /* Access mode (unused) */
uint16_t uio_extflg; /* Extra flags (UIO_DIRECT) */
ssize_t uio_resid; /* Residual unprocessed bytes */
size_t uio_skip; /* Skipped bytes in current iovec */
zfs_uio_dio_t uio_dio; /* Direct I/O user pages */
struct request *rq;
} zfs_uio_t;
#define zfs_uio_segflg(u) (u)->uio_segflg
#define zfs_uio_offset(u) (u)->uio_loffset
#define zfs_uio_resid(u) (u)->uio_resid
#define zfs_uio_iovcnt(u) (u)->uio_iovcnt
#define zfs_uio_iovlen(u, idx) (u)->uio_iov[(idx)].iov_len
#define zfs_uio_iovbase(u, idx) (u)->uio_iov[(idx)].iov_base
#define zfs_uio_fault_disable(u, set) (u)->uio_fault_disable = set
#define zfs_uio_soffset(u) (u)->uio_soffset
#define zfs_uio_rlimit_fsize(z, u) (0)
#define zfs_uio_fault_move(p, n, rw, u) zfs_uiomove((p), (n), (rw), (u))
extern int zfs_uio_prefaultpages(ssize_t, zfs_uio_t *);
static inline void
zfs_uio_setoffset(zfs_uio_t *uio, offset_t off)
{
uio->uio_loffset = off;
}
static inline void
zfs_uio_setsoffset(zfs_uio_t *uio, offset_t off)
{
ASSERT3U(zfs_uio_offset(uio), ==, off);
zfs_uio_soffset(uio) = off;
}
static inline void
zfs_uio_advance(zfs_uio_t *uio, ssize_t size)
{
uio->uio_resid -= size;
uio->uio_loffset += size;
}
static inline void
zfs_uio_iovec_init(zfs_uio_t *uio, const struct iovec *iov,
unsigned long nr_segs, offset_t offset, zfs_uio_seg_t seg, ssize_t resid,
size_t skip)
{
ASSERT(seg == UIO_SYSSPACE);
uio->uio_iov = iov;
uio->uio_iovcnt = nr_segs;
uio->uio_loffset = offset;
uio->uio_segflg = seg;
uio->uio_fault_disable = B_FALSE;
uio->uio_fmode = 0;
uio->uio_extflg = 0;
uio->uio_resid = resid;
uio->uio_skip = skip;
uio->uio_soffset = uio->uio_loffset;
memset(&uio->uio_dio, 0, sizeof (zfs_uio_dio_t));
}
static inline void
zfs_uio_bvec_init(zfs_uio_t *uio, struct bio *bio, struct request *rq)
{
/* Either bio or rq will be set, but not both */
ASSERT3P(uio, !=, bio);
if (bio) {
uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
} else {
uio->uio_bvec = NULL;
uio->uio_iovcnt = 0;
}
uio->uio_loffset = io_offset(bio, rq);
uio->uio_segflg = UIO_BVEC;
uio->uio_fault_disable = B_FALSE;
uio->uio_fmode = 0;
uio->uio_extflg = 0;
uio->uio_resid = io_size(bio, rq);
if (bio) {
uio->uio_skip = BIO_BI_SKIP(bio);
} else {
uio->uio_skip = 0;
}
uio->rq = rq;
uio->uio_soffset = uio->uio_loffset;
memset(&uio->uio_dio, 0, sizeof (zfs_uio_dio_t));
}
static inline void
zfs_uio_iov_iter_init(zfs_uio_t *uio, struct iov_iter *iter, offset_t offset,
ssize_t resid, size_t skip)
{
uio->uio_iter = iter;
uio->uio_iovcnt = iter->nr_segs;
uio->uio_loffset = offset;
uio->uio_segflg = UIO_ITER;
uio->uio_fault_disable = B_FALSE;
uio->uio_fmode = 0;
uio->uio_extflg = 0;
uio->uio_resid = resid;
uio->uio_skip = skip;
uio->uio_soffset = uio->uio_loffset;
memset(&uio->uio_dio, 0, sizeof (zfs_uio_dio_t));
}
#if defined(HAVE_ITER_IOV)
#define zfs_uio_iter_iov(iter) iter_iov((iter))
#else
#define zfs_uio_iter_iov(iter) (iter)->iov
#endif
#if defined(HAVE_IOV_ITER_TYPE)
#define zfs_uio_iov_iter_type(iter) iov_iter_type((iter))
#else
#define zfs_uio_iov_iter_type(iter) (iter)->type
#endif
+#if defined(HAVE_ITER_IS_UBUF)
+#define zfs_user_backed_iov_iter(iter) \
+ (iter_is_ubuf((iter)) || \
+ (zfs_uio_iov_iter_type((iter)) == ITER_IOVEC))
+#else
+#define zfs_user_backed_iov_iter(iter) \
+ (zfs_uio_iov_iter_type((iter)) == ITER_IOVEC)
+#endif
+
#endif /* SPL_UIO_H */
diff --git a/sys/contrib/openzfs/include/sys/dnode.h b/sys/contrib/openzfs/include/sys/dnode.h
index 5d0f0fb26d02..b6d3e2c918c5 100644
--- a/sys/contrib/openzfs/include/sys/dnode.h
+++ b/sys/contrib/openzfs/include/sys/dnode.h
@@ -1,670 +1,670 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DNODE_H
#define _SYS_DNODE_H
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/zfs_refcount.h>
#include <sys/dmu_zfetch.h>
#include <sys/zrlock.h>
#include <sys/multilist.h>
#include <sys/wmsum.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* dnode_hold() flags.
*/
#define DNODE_MUST_BE_ALLOCATED 1
#define DNODE_MUST_BE_FREE 2
#define DNODE_DRY_RUN 4
/*
* dnode_next_offset() flags.
*/
#define DNODE_FIND_HOLE 1
#define DNODE_FIND_BACKWARDS 2
#define DNODE_FIND_HAVELOCK 4
/*
* Fixed constants.
*/
#define DNODE_SHIFT 9 /* 512 bytes */
#define DN_MIN_INDBLKSHIFT 12 /* 4k */
/*
* If we ever increase this value beyond 20, we need to revisit all logic that
* does x << level * ebps to handle overflow. With a 1M indirect block size,
* 4 levels of indirect blocks would not be able to guarantee addressing an
* entire object, so 5 levels will be used, but 5 * (20 - 7) = 65.
*/
#define DN_MAX_INDBLKSHIFT 17 /* 128k */
#define DNODE_BLOCK_SHIFT 14 /* 16k */
#define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */
#define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */
#define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */
/*
* dnode id flags
*
* Note: a file will never ever have its ids moved from bonus->spill
*/
#define DN_ID_CHKED_BONUS 0x1
#define DN_ID_CHKED_SPILL 0x2
#define DN_ID_OLD_EXIST 0x4
#define DN_ID_NEW_EXIST 0x8
/*
* Derived constants.
*/
#define DNODE_MIN_SIZE (1 << DNODE_SHIFT)
#define DNODE_MAX_SIZE (1 << DNODE_BLOCK_SHIFT)
#define DNODE_BLOCK_SIZE (1 << DNODE_BLOCK_SHIFT)
#define DNODE_MIN_SLOTS (DNODE_MIN_SIZE >> DNODE_SHIFT)
#define DNODE_MAX_SLOTS (DNODE_MAX_SIZE >> DNODE_SHIFT)
#define DN_BONUS_SIZE(dnsize) ((dnsize) - DNODE_CORE_SIZE - \
(1 << SPA_BLKPTRSHIFT))
#define DN_SLOTS_TO_BONUSLEN(slots) DN_BONUS_SIZE((slots) << DNODE_SHIFT)
#define DN_OLD_MAX_BONUSLEN (DN_BONUS_SIZE(DNODE_MIN_SIZE))
#define DN_MAX_NBLKPTR ((DNODE_MIN_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT)
#define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT)
#define DN_ZERO_BONUSLEN (DN_BONUS_SIZE(DNODE_MAX_SIZE) + 1)
#define DN_KILL_SPILLBLK (1)
#define DN_SLOT_UNINIT ((void *)NULL) /* Uninitialized */
#define DN_SLOT_FREE ((void *)1UL) /* Free slot */
#define DN_SLOT_ALLOCATED ((void *)2UL) /* Allocated slot */
#define DN_SLOT_INTERIOR ((void *)3UL) /* Interior allocated slot */
#define DN_SLOT_IS_PTR(dn) ((void *)dn > DN_SLOT_INTERIOR)
#define DN_SLOT_IS_VALID(dn) ((void *)dn != NULL)
#define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT)
#define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT)
/*
* This is inaccurate if the indblkshift of the particular object is not the
* max. But it's only used by userland to calculate the zvol reservation.
*/
#define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
#define DNODES_PER_LEVEL (1ULL << DNODES_PER_LEVEL_SHIFT)
#define DN_MAX_LEVELS (DIV_ROUND_UP(DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT, \
DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT) + 1)
/*
* Use the flexible array instead of the fixed length one dn_bonus
* to address memcpy/memmove fortify error
*/
#define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus_flexible + \
(((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
#define DN_MAX_BONUS_LEN(dnp) \
((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? \
(uint8_t *)DN_SPILL_BLKPTR(dnp) - (uint8_t *)DN_BONUS(dnp) : \
(uint8_t *)(dnp + (dnp->dn_extra_slots + 1)) - (uint8_t *)DN_BONUS(dnp))
#define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
(dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
#define EPB(blkshift, typeshift) (1 << (blkshift - typeshift))
struct dmu_buf_impl;
struct objset;
struct zio;
enum dnode_dirtycontext {
DN_UNDIRTIED,
DN_DIRTY_OPEN,
DN_DIRTY_SYNC
};
/* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */
#define DNODE_FLAG_USED_BYTES (1 << 0)
#define DNODE_FLAG_USERUSED_ACCOUNTED (1 << 1)
/* Does dnode have a SA spill blkptr in bonus? */
#define DNODE_FLAG_SPILL_BLKPTR (1 << 2)
/* User/Group/Project dnode accounting */
#define DNODE_FLAG_USEROBJUSED_ACCOUNTED (1 << 3)
/*
* This mask defines the set of flags which are "portable", meaning
* that they can be preserved when doing a raw encrypted zfs send.
* Flags included in this mask will be protected by AAD when the block
* of dnodes is encrypted.
*/
#define DNODE_CRYPT_PORTABLE_FLAGS_MASK (DNODE_FLAG_SPILL_BLKPTR)
/*
* VARIABLE-LENGTH (LARGE) DNODES
*
* The motivation for variable-length dnodes is to eliminate the overhead
* associated with using spill blocks. Spill blocks are used to store
* system attribute data (i.e. file metadata) that does not fit in the
* dnode's bonus buffer. By allowing a larger bonus buffer area the use of
* a spill block can be avoided. Spill blocks potentially incur an
* additional read I/O for every dnode in a dnode block. As a worst case
* example, reading 32 dnodes from a 16k dnode block and all of the spill
* blocks could issue 33 separate reads. Now suppose those dnodes have size
* 1024 and therefore don't need spill blocks. Then the worst case number
* of blocks read is reduced from 33 to two--one per dnode block.
*
* ZFS-on-Linux systems that make heavy use of extended attributes benefit
* from this feature. In particular, ZFS-on-Linux supports the xattr=sa
* dataset property which allows file extended attribute data to be stored
* in the dnode bonus buffer as an alternative to the traditional
* directory-based format. Workloads such as SELinux and the Lustre
* distributed filesystem often store enough xattr data to force spill
* blocks when xattr=sa is in effect. Large dnodes may therefore provide a
* performance benefit to such systems. Other use cases that benefit from
* this feature include files with large ACLs and symbolic links with long
* target names.
*
* The size of a dnode may be a multiple of 512 bytes up to the size of a
* dnode block (currently 16384 bytes). The dn_extra_slots field of the
* on-disk dnode_phys_t structure describes the size of the physical dnode
* on disk. The field represents how many "extra" dnode_phys_t slots a
* dnode consumes in its dnode block. This convention results in a value of
* 0 for 512 byte dnodes which preserves on-disk format compatibility with
* older software which doesn't support large dnodes.
*
* Similarly, the in-memory dnode_t structure has a dn_num_slots field
* to represent the total number of dnode_phys_t slots consumed on disk.
* Thus dn->dn_num_slots is 1 greater than the corresponding
* dnp->dn_extra_slots. This difference in convention was adopted
* because, unlike on-disk structures, backward compatibility is not a
* concern for in-memory objects, so we used a more natural way to
* represent size for a dnode_t.
*
* The default size for newly created dnodes is determined by the value of
* the "dnodesize" dataset property. By default the property is set to
* "legacy" which is compatible with older software. Setting the property
* to "auto" will allow the filesystem to choose the most suitable dnode
* size. Currently this just sets the default dnode size to 1k, but future
* code improvements could dynamically choose a size based on observed
* workload patterns. Dnodes of varying sizes can coexist within the same
* dataset and even within the same dnode block.
*/
typedef struct dnode_phys {
uint8_t dn_type; /* dmu_object_type_t */
uint8_t dn_indblkshift; /* ln2(indirect block size) */
uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */
uint8_t dn_nblkptr; /* length of dn_blkptr */
uint8_t dn_bonustype; /* type of data in bonus buffer */
uint8_t dn_checksum; /* ZIO_CHECKSUM type */
uint8_t dn_compress; /* ZIO_COMPRESS type */
uint8_t dn_flags; /* DNODE_FLAG_* */
uint16_t dn_datablkszsec; /* data block size in 512b sectors */
uint16_t dn_bonuslen; /* length of dn_bonus */
uint8_t dn_extra_slots; /* # of subsequent slots consumed */
uint8_t dn_pad2[3];
/* accounting is protected by dn_dirty_mtx */
uint64_t dn_maxblkid; /* largest allocated block ID */
uint64_t dn_used; /* bytes (or sectors) of disk space */
/*
* Both dn_pad2 and dn_pad3 are protected by the block's MAC. This
* allows us to protect any fields that might be added here in the
* future. In either case, developers will want to check
* zio_crypt_init_uios_dnode() and zio_crypt_do_dnode_hmac_updates()
* to ensure the new field is being protected and updated properly.
*/
uint64_t dn_pad3[4];
/*
* The tail region is 448 bytes for a 512 byte dnode, and
* correspondingly larger for larger dnode sizes. The spill
* block pointer, when present, is always at the end of the tail
* region. There are three ways this space may be used, using
* a 512 byte dnode for this diagram:
*
* 0 64 128 192 256 320 384 448 (offset)
* +---------------+---------------+---------------+-------+
* | dn_blkptr[0] | dn_blkptr[1] | dn_blkptr[2] | / |
* +---------------+---------------+---------------+-------+
* | dn_blkptr[0] | dn_bonus[0..319] |
* +---------------+-----------------------+---------------+
* | dn_blkptr[0] | dn_bonus[0..191] | dn_spill |
* +---------------+-----------------------+---------------+
*/
union {
blkptr_t dn_blkptr[1+DN_OLD_MAX_BONUSLEN/sizeof (blkptr_t)];
struct {
blkptr_t __dn_ignore1;
uint8_t dn_bonus[DN_OLD_MAX_BONUSLEN];
};
struct {
blkptr_t __dn_ignore2;
uint8_t __dn_ignore3[DN_OLD_MAX_BONUSLEN -
sizeof (blkptr_t)];
blkptr_t dn_spill;
};
struct {
blkptr_t __dn_ignore4;
uint8_t dn_bonus_flexible[];
};
};
} dnode_phys_t;
#define DN_SPILL_BLKPTR(dnp) ((blkptr_t *)((char *)(dnp) + \
(((dnp)->dn_extra_slots + 1) << DNODE_SHIFT) - (1 << SPA_BLKPTRSHIFT)))
struct dnode {
/*
* Protects the structure of the dnode, including the number of levels
* of indirection (dn_nlevels), dn_maxblkid, and dn_next_*
*/
krwlock_t dn_struct_rwlock;
/* Our link on dn_objset->os_dnodes list; protected by os_lock. */
list_node_t dn_link;
/* immutable: */
struct objset *dn_objset;
uint64_t dn_object;
struct dmu_buf_impl *dn_dbuf;
struct dnode_handle *dn_handle;
dnode_phys_t *dn_phys; /* pointer into dn->dn_dbuf->db.db_data */
/*
* Copies of stuff in dn_phys. They're valid in the open
* context (eg. even before the dnode is first synced).
* Where necessary, these are protected by dn_struct_rwlock.
*/
dmu_object_type_t dn_type; /* object type */
uint16_t dn_bonuslen; /* bonus length */
uint8_t dn_bonustype; /* bonus type */
uint8_t dn_nblkptr; /* number of blkptrs (immutable) */
uint8_t dn_checksum; /* ZIO_CHECKSUM type */
uint8_t dn_compress; /* ZIO_COMPRESS type */
uint8_t dn_nlevels;
uint8_t dn_indblkshift;
uint8_t dn_datablkshift; /* zero if blksz not power of 2! */
uint8_t dn_moved; /* Has this dnode been moved? */
uint16_t dn_datablkszsec; /* in 512b sectors */
uint32_t dn_datablksz; /* in bytes */
uint64_t dn_maxblkid;
uint8_t dn_next_type[TXG_SIZE];
uint8_t dn_num_slots; /* metadnode slots consumed on disk */
uint8_t dn_next_nblkptr[TXG_SIZE];
uint8_t dn_next_nlevels[TXG_SIZE];
uint8_t dn_next_indblkshift[TXG_SIZE];
uint8_t dn_next_bonustype[TXG_SIZE];
uint8_t dn_rm_spillblk[TXG_SIZE]; /* for removing spill blk */
uint16_t dn_next_bonuslen[TXG_SIZE];
uint32_t dn_next_blksz[TXG_SIZE]; /* next block size in bytes */
uint64_t dn_next_maxblkid[TXG_SIZE]; /* next maxblkid in bytes */
/* protected by dn_dbufs_mtx; declared here to fill 32-bit hole */
uint32_t dn_dbufs_count; /* count of dn_dbufs */
/* protected by os_lock: */
multilist_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */
/* protected by dn_mtx: */
kmutex_t dn_mtx;
list_t dn_dirty_records[TXG_SIZE];
- struct range_tree *dn_free_ranges[TXG_SIZE];
+ struct zfs_range_tree *dn_free_ranges[TXG_SIZE];
uint64_t dn_allocated_txg;
uint64_t dn_free_txg;
uint64_t dn_assigned_txg;
uint64_t dn_dirty_txg; /* txg dnode was last dirtied */
kcondvar_t dn_notxholds;
kcondvar_t dn_nodnholds;
enum dnode_dirtycontext dn_dirtyctx;
const void *dn_dirtyctx_firstset; /* dbg: contents meaningless */
/* protected by own devices */
zfs_refcount_t dn_tx_holds;
zfs_refcount_t dn_holds;
kmutex_t dn_dbufs_mtx;
/*
* Descendent dbufs, ordered by dbuf_compare. Note that dn_dbufs
* can contain multiple dbufs of the same (level, blkid) when a
* dbuf is marked DB_EVICTING without being removed from
* dn_dbufs. To maintain the avl invariant that there cannot be
* duplicate entries, we order the dbufs by an arbitrary value -
* their address in memory. This means that dn_dbufs cannot be used to
* directly look up a dbuf. Instead, callers must use avl_walk, have
* a reference to the dbuf, or look up a non-existent node with
* db_state = DB_SEARCH (see dbuf_free_range for an example).
*/
avl_tree_t dn_dbufs;
/* protected by dn_struct_rwlock */
struct dmu_buf_impl *dn_bonus; /* bonus buffer dbuf */
boolean_t dn_have_spill; /* have spill or are spilling */
/* parent IO for current sync write */
zio_t *dn_zio;
/* used in syncing context */
uint64_t dn_oldused; /* old phys used bytes */
uint64_t dn_oldflags; /* old phys dn_flags */
uint64_t dn_olduid, dn_oldgid, dn_oldprojid;
uint64_t dn_newuid, dn_newgid, dn_newprojid;
int dn_id_flags;
/* holds prefetch structure */
struct zfetch dn_zfetch;
/* Not in dn_phys, but should be. set it after taking a hold */
dmu_object_type_t dn_storage_type; /* type for storage class */
};
/*
* Since AVL already has embedded element counter, use dn_dbufs_count
* only for dbufs not counted there (bonus buffers) and just add them.
*/
#define DN_DBUFS_COUNT(dn) ((dn)->dn_dbufs_count + \
avl_numnodes(&(dn)->dn_dbufs))
/*
* We use this (otherwise unused) bit to indicate if the value of
* dn_next_maxblkid[txgoff] is valid to use in dnode_sync().
*/
#define DMU_NEXT_MAXBLKID_SET (1ULL << 63)
/*
* Adds a level of indirection between the dbuf and the dnode to avoid
* iterating descendent dbufs in dnode_move(). Handles are not allocated
* individually, but as an array of child dnodes in dnode_hold_impl().
*/
typedef struct dnode_handle {
/* Protects dnh_dnode from modification by dnode_move(). */
zrlock_t dnh_zrlock;
dnode_t *dnh_dnode;
} dnode_handle_t;
typedef struct dnode_children {
dmu_buf_user_t dnc_dbu; /* User evict data */
size_t dnc_count; /* number of children */
dnode_handle_t dnc_children[]; /* sized dynamically */
} dnode_children_t;
typedef struct free_range {
avl_node_t fr_node;
uint64_t fr_blkid;
uint64_t fr_nblks;
} free_range_t;
void dnode_special_open(struct objset *dd, dnode_phys_t *dnp,
uint64_t object, dnode_handle_t *dnh);
void dnode_special_close(dnode_handle_t *dnh);
void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx);
void dnode_setbonus_type(dnode_t *dn, dmu_object_type_t, dmu_tx_t *tx);
void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx);
int dnode_hold(struct objset *dd, uint64_t object,
const void *ref, dnode_t **dnp);
int dnode_hold_impl(struct objset *dd, uint64_t object, int flag, int dn_slots,
const void *ref, dnode_t **dnp);
boolean_t dnode_add_ref(dnode_t *dn, const void *ref);
void dnode_rele(dnode_t *dn, const void *ref);
void dnode_rele_and_unlock(dnode_t *dn, const void *tag, boolean_t evicting);
int dnode_try_claim(objset_t *os, uint64_t object, int slots);
boolean_t dnode_is_dirty(dnode_t *dn);
void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
void dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag);
void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);
void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, int dn_slots,
boolean_t keep_spill, dmu_tx_t *tx);
void dnode_free(dnode_t *dn, dmu_tx_t *tx);
void dnode_byteswap(dnode_phys_t *dnp);
void dnode_buf_byteswap(void *buf, size_t size);
void dnode_verify(dnode_t *dn);
int dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx);
int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
void dnode_diduse_space(dnode_t *dn, int64_t space);
void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx,
boolean_t have_read, boolean_t force);
uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid);
void dnode_init(void);
void dnode_fini(void);
int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off,
int minlvl, uint64_t blkfill, uint64_t txg);
void dnode_evict_dbufs(dnode_t *dn);
void dnode_evict_bonus(dnode_t *dn);
void dnode_free_interior_slots(dnode_t *dn);
void dnode_set_storage_type(dnode_t *dn, dmu_object_type_t type);
#define DNODE_IS_DIRTY(_dn) \
((_dn)->dn_dirty_txg >= spa_syncing_txg((_dn)->dn_objset->os_spa))
#define DNODE_LEVEL_IS_CACHEABLE(_dn, _level) \
((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL || \
(((_level) > 0 || DMU_OT_IS_METADATA((_dn)->dn_type)) && \
(_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA))
/*
* Used for dnodestats kstat.
*/
typedef struct dnode_stats {
/*
* Number of failed attempts to hold a meta dnode dbuf.
*/
kstat_named_t dnode_hold_dbuf_hold;
/*
* Number of failed attempts to read a meta dnode dbuf.
*/
kstat_named_t dnode_hold_dbuf_read;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was able
* to hold the requested object number which was allocated. This is
* the common case when looking up any allocated object number.
*/
kstat_named_t dnode_hold_alloc_hits;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was not
* able to hold the request object number because it was not allocated.
*/
kstat_named_t dnode_hold_alloc_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was not
* able to hold the request object number because the object number
* refers to an interior large dnode slot.
*/
kstat_named_t dnode_hold_alloc_interior;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) needed
* to retry acquiring slot zrl locks due to contention.
*/
kstat_named_t dnode_hold_alloc_lock_retry;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) did not
* need to create the dnode because another thread did so after
* dropping the read lock but before acquiring the write lock.
*/
kstat_named_t dnode_hold_alloc_lock_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) found
* a free dnode instantiated by dnode_create() but not yet allocated
* by dnode_allocate().
*/
kstat_named_t dnode_hold_alloc_type_none;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was able
* to hold the requested range of free dnode slots.
*/
kstat_named_t dnode_hold_free_hits;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was not
* able to hold the requested range of free dnode slots because
* at least one slot was allocated.
*/
kstat_named_t dnode_hold_free_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was not
* able to hold the requested range of free dnode slots because
* after acquiring the zrl lock at least one slot was allocated.
*/
kstat_named_t dnode_hold_free_lock_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) needed
* to retry acquiring slot zrl locks due to contention.
*/
kstat_named_t dnode_hold_free_lock_retry;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
* a range of dnode slots which were held by another thread.
*/
kstat_named_t dnode_hold_free_refcount;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
* a range of dnode slots which would overflow the dnode_phys_t.
*/
kstat_named_t dnode_hold_free_overflow;
/*
* Number of times dnode_free_interior_slots() needed to retry
* acquiring a slot zrl lock due to contention.
*/
kstat_named_t dnode_free_interior_lock_retry;
/*
* Number of new dnodes allocated by dnode_allocate().
*/
kstat_named_t dnode_allocate;
/*
* Number of dnodes re-allocated by dnode_reallocate().
*/
kstat_named_t dnode_reallocate;
/*
* Number of meta dnode dbufs evicted.
*/
kstat_named_t dnode_buf_evict;
/*
* Number of times dmu_object_alloc*() reached the end of the existing
* object ID chunk and advanced to a new one.
*/
kstat_named_t dnode_alloc_next_chunk;
/*
* Number of times multiple threads attempted to allocate a dnode
* from the same block of free dnodes.
*/
kstat_named_t dnode_alloc_race;
/*
* Number of times dmu_object_alloc*() was forced to advance to the
* next meta dnode dbuf due to an error from dmu_object_next().
*/
kstat_named_t dnode_alloc_next_block;
/*
* Statistics for tracking dnodes which have been moved.
*/
kstat_named_t dnode_move_invalid;
kstat_named_t dnode_move_recheck1;
kstat_named_t dnode_move_recheck2;
kstat_named_t dnode_move_special;
kstat_named_t dnode_move_handle;
kstat_named_t dnode_move_rwlock;
kstat_named_t dnode_move_active;
} dnode_stats_t;
typedef struct dnode_sums {
wmsum_t dnode_hold_dbuf_hold;
wmsum_t dnode_hold_dbuf_read;
wmsum_t dnode_hold_alloc_hits;
wmsum_t dnode_hold_alloc_misses;
wmsum_t dnode_hold_alloc_interior;
wmsum_t dnode_hold_alloc_lock_retry;
wmsum_t dnode_hold_alloc_lock_misses;
wmsum_t dnode_hold_alloc_type_none;
wmsum_t dnode_hold_free_hits;
wmsum_t dnode_hold_free_misses;
wmsum_t dnode_hold_free_lock_misses;
wmsum_t dnode_hold_free_lock_retry;
wmsum_t dnode_hold_free_refcount;
wmsum_t dnode_hold_free_overflow;
wmsum_t dnode_free_interior_lock_retry;
wmsum_t dnode_allocate;
wmsum_t dnode_reallocate;
wmsum_t dnode_buf_evict;
wmsum_t dnode_alloc_next_chunk;
wmsum_t dnode_alloc_race;
wmsum_t dnode_alloc_next_block;
wmsum_t dnode_move_invalid;
wmsum_t dnode_move_recheck1;
wmsum_t dnode_move_recheck2;
wmsum_t dnode_move_special;
wmsum_t dnode_move_handle;
wmsum_t dnode_move_rwlock;
wmsum_t dnode_move_active;
} dnode_sums_t;
extern dnode_stats_t dnode_stats;
extern dnode_sums_t dnode_sums;
#define DNODE_STAT_INCR(stat, val) \
wmsum_add(&dnode_sums.stat, (val))
#define DNODE_STAT_BUMP(stat) \
DNODE_STAT_INCR(stat, 1);
#ifdef ZFS_DEBUG
#define dprintf_dnode(dn, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char __db_buf[32]; \
uint64_t __db_obj = (dn)->dn_object; \
if (__db_obj == DMU_META_DNODE_OBJECT) \
(void) strlcpy(__db_buf, "mdn", sizeof (__db_buf)); \
else \
(void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
(u_longlong_t)__db_obj);\
dprintf_ds((dn)->dn_objset->os_dsl_dataset, "obj=%s " fmt, \
__db_buf, __VA_ARGS__); \
} \
} while (0)
#define DNODE_VERIFY(dn) dnode_verify(dn)
#define FREE_VERIFY(db, start, end, tx) free_verify(db, start, end, tx)
#else
#define dprintf_dnode(db, fmt, ...)
#define DNODE_VERIFY(dn) ((void) sizeof ((uintptr_t)(dn)))
#define FREE_VERIFY(db, start, end, tx)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DNODE_H */
diff --git a/sys/contrib/openzfs/include/sys/metaslab.h b/sys/contrib/openzfs/include/sys/metaslab.h
index 815b5d0c9cf1..0171cd0fe0f8 100644
--- a/sys/contrib/openzfs/include/sys/metaslab.h
+++ b/sys/contrib/openzfs/include/sys/metaslab.h
@@ -1,149 +1,149 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#ifndef _SYS_METASLAB_H
#define _SYS_METASLAB_H
#include <sys/spa.h>
#include <sys/space_map.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/avl.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct metaslab_ops {
const char *msop_name;
uint64_t (*msop_alloc)(metaslab_t *, uint64_t);
} metaslab_ops_t;
extern const metaslab_ops_t zfs_metaslab_ops;
int metaslab_init(metaslab_group_t *, uint64_t, uint64_t, uint64_t,
metaslab_t **);
void metaslab_fini(metaslab_t *);
void metaslab_set_unflushed_dirty(metaslab_t *, boolean_t);
void metaslab_set_unflushed_txg(metaslab_t *, uint64_t, dmu_tx_t *);
void metaslab_set_estimated_condensed_size(metaslab_t *, uint64_t, dmu_tx_t *);
boolean_t metaslab_unflushed_dirty(metaslab_t *);
uint64_t metaslab_unflushed_txg(metaslab_t *);
uint64_t metaslab_estimated_condensed_size(metaslab_t *);
int metaslab_sort_by_flushed(const void *, const void *);
void metaslab_unflushed_bump(metaslab_t *, dmu_tx_t *, boolean_t);
uint64_t metaslab_unflushed_changes_memused(metaslab_t *);
int metaslab_load(metaslab_t *);
void metaslab_unload(metaslab_t *);
boolean_t metaslab_flush(metaslab_t *, dmu_tx_t *);
uint64_t metaslab_allocated_space(metaslab_t *);
void metaslab_sync(metaslab_t *, uint64_t);
void metaslab_sync_done(metaslab_t *, uint64_t);
void metaslab_sync_reassess(metaslab_group_t *);
uint64_t metaslab_largest_allocatable(metaslab_t *);
/*
* metaslab alloc flags
*/
#define METASLAB_HINTBP_FAVOR 0x0
#define METASLAB_HINTBP_AVOID 0x1
#define METASLAB_GANG_HEADER 0x2
#define METASLAB_GANG_CHILD 0x4
#define METASLAB_ASYNC_ALLOC 0x8
#define METASLAB_DONT_THROTTLE 0x10
#define METASLAB_MUST_RESERVE 0x20
#define METASLAB_ZIL 0x80
int metaslab_alloc(spa_t *, metaslab_class_t *, uint64_t,
blkptr_t *, int, uint64_t, blkptr_t *, int, zio_alloc_list_t *, zio_t *,
int);
int metaslab_alloc_dva(spa_t *, metaslab_class_t *, uint64_t,
dva_t *, int, dva_t *, uint64_t, int, zio_alloc_list_t *, int);
void metaslab_free(spa_t *, const blkptr_t *, uint64_t, boolean_t);
void metaslab_free_concrete(vdev_t *, uint64_t, uint64_t, boolean_t);
void metaslab_free_dva(spa_t *, const dva_t *, boolean_t);
void metaslab_free_impl_cb(uint64_t, vdev_t *, uint64_t, uint64_t, void *);
void metaslab_unalloc_dva(spa_t *, const dva_t *, uint64_t);
int metaslab_claim(spa_t *, const blkptr_t *, uint64_t);
int metaslab_claim_impl(vdev_t *, uint64_t, uint64_t, uint64_t);
void metaslab_check_free(spa_t *, const blkptr_t *);
void metaslab_stat_init(void);
void metaslab_stat_fini(void);
void metaslab_trace_init(zio_alloc_list_t *);
void metaslab_trace_fini(zio_alloc_list_t *);
metaslab_class_t *metaslab_class_create(spa_t *, const metaslab_ops_t *);
void metaslab_class_destroy(metaslab_class_t *);
int metaslab_class_validate(metaslab_class_t *);
void metaslab_class_histogram_verify(metaslab_class_t *);
uint64_t metaslab_class_fragmentation(metaslab_class_t *);
uint64_t metaslab_class_expandable_space(metaslab_class_t *);
boolean_t metaslab_class_throttle_reserve(metaslab_class_t *, int, int,
zio_t *, int);
void metaslab_class_throttle_unreserve(metaslab_class_t *, int, int, zio_t *);
void metaslab_class_evict_old(metaslab_class_t *, uint64_t);
uint64_t metaslab_class_get_alloc(metaslab_class_t *);
uint64_t metaslab_class_get_space(metaslab_class_t *);
uint64_t metaslab_class_get_dspace(metaslab_class_t *);
uint64_t metaslab_class_get_deferred(metaslab_class_t *);
void metaslab_space_update(vdev_t *, metaslab_class_t *,
int64_t, int64_t, int64_t);
metaslab_group_t *metaslab_group_create(metaslab_class_t *, vdev_t *, int);
void metaslab_group_destroy(metaslab_group_t *);
void metaslab_group_activate(metaslab_group_t *);
void metaslab_group_passivate(metaslab_group_t *);
boolean_t metaslab_group_initialized(metaslab_group_t *);
uint64_t metaslab_group_get_space(metaslab_group_t *);
void metaslab_group_histogram_verify(metaslab_group_t *);
uint64_t metaslab_group_fragmentation(metaslab_group_t *);
void metaslab_group_histogram_remove(metaslab_group_t *, metaslab_t *);
void metaslab_group_alloc_decrement(spa_t *, uint64_t, const void *, int, int,
boolean_t);
void metaslab_group_alloc_verify(spa_t *, const blkptr_t *, const void *, int);
void metaslab_recalculate_weight_and_sort(metaslab_t *);
void metaslab_disable(metaslab_t *);
void metaslab_enable(metaslab_t *, boolean_t, boolean_t);
void metaslab_set_selected_txg(metaslab_t *, uint64_t);
extern int metaslab_debug_load;
-range_seg_type_t metaslab_calculate_range_tree_type(vdev_t *vdev,
+zfs_range_seg_type_t metaslab_calculate_range_tree_type(vdev_t *vdev,
metaslab_t *msp, uint64_t *start, uint64_t *shift);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_METASLAB_H */
diff --git a/sys/contrib/openzfs/include/sys/metaslab_impl.h b/sys/contrib/openzfs/include/sys/metaslab_impl.h
index 4f434291ddbf..9c35f27ff0b4 100644
--- a/sys/contrib/openzfs/include/sys/metaslab_impl.h
+++ b/sys/contrib/openzfs/include/sys/metaslab_impl.h
@@ -1,572 +1,574 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_METASLAB_IMPL_H
#define _SYS_METASLAB_IMPL_H
#include <sys/metaslab.h>
#include <sys/space_map.h>
#include <sys/range_tree.h>
#include <sys/vdev.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/multilist.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Metaslab allocation tracing record.
*/
typedef struct metaslab_alloc_trace {
list_node_t mat_list_node;
metaslab_group_t *mat_mg;
metaslab_t *mat_msp;
uint64_t mat_size;
uint64_t mat_weight;
uint32_t mat_dva_id;
uint64_t mat_offset;
int mat_allocator;
} metaslab_alloc_trace_t;
/*
* Used by the metaslab allocation tracing facility to indicate
* error conditions. These errors are stored to the offset member
* of the metaslab_alloc_trace_t record and displayed by mdb.
*/
typedef enum trace_alloc_type {
TRACE_ALLOC_FAILURE = -1ULL,
TRACE_TOO_SMALL = -2ULL,
TRACE_FORCE_GANG = -3ULL,
TRACE_NOT_ALLOCATABLE = -4ULL,
TRACE_GROUP_FAILURE = -5ULL,
TRACE_ENOSPC = -6ULL,
TRACE_CONDENSING = -7ULL,
TRACE_VDEV_ERROR = -8ULL,
TRACE_DISABLED = -9ULL,
} trace_alloc_type_t;
#define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
#define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
#define METASLAB_WEIGHT_CLAIM (1ULL << 61)
#define METASLAB_WEIGHT_TYPE (1ULL << 60)
#define METASLAB_ACTIVE_MASK \
(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY | \
METASLAB_WEIGHT_CLAIM)
/*
* The metaslab weight is used to encode the amount of free space in a
* metaslab, such that the "best" metaslab appears first when sorting the
* metaslabs by weight. The weight (and therefore the "best" metaslab) can
* be determined in two different ways: by computing a weighted sum of all
* the free space in the metaslab (a space based weight) or by counting only
* the free segments of the largest size (a segment based weight). We prefer
* the segment based weight because it reflects how the free space is
* comprised, but we cannot always use it -- legacy pools do not have the
* space map histogram information necessary to determine the largest
* contiguous regions. Pools that have the space map histogram determine
* the segment weight by looking at each bucket in the histogram and
* determining the free space whose size in bytes is in the range:
* [2^i, 2^(i+1))
* We then encode the largest index, i, that contains regions into the
* segment-weighted value.
*
* Space-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PSC1| weighted-free space |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* C - indicates activation for claimed block zio
* space - the fragmentation-weighted space
*
* Segment-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PSC0| idx| count of segments in region |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* C - indicates activation for claimed block zio
* idx - index for the highest bucket in the histogram
* count - number of segments in the specified bucket
*/
#define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 61, 3)
#define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 61, 3, x)
#define WEIGHT_IS_SPACEBASED(weight) \
((weight) == 0 || BF64_GET((weight), 60, 1))
#define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 60, 1, 1)
/*
* These macros are only applicable to segment-based weighting.
*/
#define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 54, 6)
#define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 54, 6, x)
#define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 54)
#define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 54, x)
/*
* Per-allocator data structure.
*/
typedef struct metaslab_class_allocator {
metaslab_group_t *mca_rotor;
uint64_t mca_aliquot;
/*
* The allocation throttle works on a reservation system. Whenever
* an asynchronous zio wants to perform an allocation it must
* first reserve the number of blocks that it wants to allocate.
* If there aren't sufficient slots available for the pending zio
* then that I/O is throttled until more slots free up. The current
* number of reserved allocations is maintained by the mca_alloc_slots
* refcount. The mca_alloc_max_slots value determines the maximum
* number of allocations that the system allows. Gang blocks are
* allowed to reserve slots even if we've reached the maximum
* number of allocations allowed.
*/
uint64_t mca_alloc_max_slots;
zfs_refcount_t mca_alloc_slots;
} ____cacheline_aligned metaslab_class_allocator_t;
/*
* A metaslab class encompasses a category of allocatable top-level vdevs.
* Each top-level vdev is associated with a metaslab group which defines
* the allocatable region for that vdev. Examples of these categories include
* "normal" for data block allocations (i.e. main pool allocations) or "log"
* for allocations designated for intent log devices (i.e. slog devices).
* When a block allocation is requested from the SPA it is associated with a
* metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
* to the class can be used to satisfy that request. Allocations are done
* by traversing the metaslab groups that are linked off of the mca_rotor field.
* This rotor points to the next metaslab group where allocations will be
* attempted. Allocating a block is a 3 step process -- select the metaslab
* group, select the metaslab, and then allocate the block. The metaslab
* class defines the low-level block allocator that will be used as the
* final step in allocation. These allocators are pluggable allowing each class
* to use a block allocator that best suits that class.
*/
struct metaslab_class {
kmutex_t mc_lock;
spa_t *mc_spa;
const metaslab_ops_t *mc_ops;
/*
* Track the number of metaslab groups that have been initialized
* and can accept allocations. An initialized metaslab group is
* one has been completely added to the config (i.e. we have
* updated the MOS config and the space has been added to the pool).
*/
uint64_t mc_groups;
/*
* Toggle to enable/disable the allocation throttle.
*/
boolean_t mc_alloc_throttle_enabled;
uint64_t mc_alloc_groups; /* # of allocatable groups */
uint64_t mc_alloc; /* total allocated space */
uint64_t mc_deferred; /* total deferred frees */
uint64_t mc_space; /* total space (alloc + free) */
uint64_t mc_dspace; /* total deflated space */
- uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
+ uint64_t mc_histogram[ZFS_RANGE_TREE_HISTOGRAM_SIZE];
/*
* List of all loaded metaslabs in the class, sorted in order of most
* recent use.
*/
multilist_t mc_metaslab_txg_list;
metaslab_class_allocator_t mc_allocator[];
};
/*
* Per-allocator data structure.
*/
typedef struct metaslab_group_allocator {
uint64_t mga_cur_max_alloc_queue_depth;
zfs_refcount_t mga_alloc_queue_depth;
metaslab_t *mga_primary;
metaslab_t *mga_secondary;
} metaslab_group_allocator_t;
/*
* Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
* of a top-level vdev. They are linked together to form a circular linked
* list and can belong to only one metaslab class. Metaslab groups may become
* ineligible for allocations for a number of reasons such as limited free
* space, fragmentation, or going offline. When this happens the allocator will
* simply find the next metaslab group in the linked list and attempt
* to allocate from that group instead.
*/
struct metaslab_group {
kmutex_t mg_lock;
avl_tree_t mg_metaslab_tree;
uint64_t mg_aliquot;
boolean_t mg_allocatable; /* can we allocate? */
uint64_t mg_ms_ready;
/*
* A metaslab group is considered to be initialized only after
* we have updated the MOS config and added the space to the pool.
* We only allow allocation attempts to a metaslab group if it
* has been initialized.
*/
boolean_t mg_initialized;
uint64_t mg_free_capacity; /* percentage free */
int64_t mg_bias;
int64_t mg_activation_count;
metaslab_class_t *mg_class;
vdev_t *mg_vd;
metaslab_group_t *mg_prev;
metaslab_group_t *mg_next;
/*
* In order for the allocation throttle to function properly, we cannot
* have too many IOs going to each disk by default; the throttle
* operates by allocating more work to disks that finish quickly, so
* allocating larger chunks to each disk reduces its effectiveness.
* However, if the number of IOs going to each allocator is too small,
* we will not perform proper aggregation at the vdev_queue layer,
* also resulting in decreased performance. Therefore, we will use a
* ramp-up strategy.
*
* Each allocator in each metaslab group has a current queue depth
* (mg_alloc_queue_depth[allocator]) and a current max queue depth
* (mga_cur_max_alloc_queue_depth[allocator]), and each metaslab group
* has an absolute max queue depth (mg_max_alloc_queue_depth). We
* add IOs to an allocator until the mg_alloc_queue_depth for that
* allocator hits the cur_max. Every time an IO completes for a given
* allocator on a given metaslab group, we increment its cur_max until
* it reaches mg_max_alloc_queue_depth. The cur_max resets every txg to
* help protect against disks that decrease in performance over time.
*
* It's possible for an allocator to handle more allocations than
* its max. This can occur when gang blocks are required or when other
* groups are unable to handle their share of allocations.
*/
uint64_t mg_max_alloc_queue_depth;
/*
* A metalab group that can no longer allocate the minimum block
* size will set mg_no_free_space. Once a metaslab group is out
* of space then its share of work must be distributed to other
* groups.
*/
boolean_t mg_no_free_space;
uint64_t mg_allocations;
uint64_t mg_failed_allocations;
uint64_t mg_fragmentation;
- uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
+ uint64_t mg_histogram[ZFS_RANGE_TREE_HISTOGRAM_SIZE];
int mg_ms_disabled;
boolean_t mg_disabled_updating;
kmutex_t mg_ms_disabled_lock;
kcondvar_t mg_ms_disabled_cv;
int mg_allocators;
metaslab_group_allocator_t mg_allocator[];
};
/*
* This value defines the number of elements in the ms_lbas array. The value
* of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
* This is the equivalent of highbit(UINT64_MAX).
*/
#define MAX_LBAS 64
/*
* Each metaslab maintains a set of in-core trees to track metaslab
* operations. The in-core free tree (ms_allocatable) contains the list of
* free segments which are eligible for allocation. As blocks are
* allocated, the allocated segments are removed from the ms_allocatable and
* added to a per txg allocation tree (ms_allocating). As blocks are
* freed, they are added to the free tree (ms_freeing). These trees
* allow us to process all allocations and frees in syncing context
* where it is safe to update the on-disk space maps. An additional set
* of in-core trees is maintained to track deferred frees
* (ms_defer). Once a block is freed it will move from the
* ms_freed to the ms_defer tree. A deferred free means that a block
* has been freed but cannot be used by the pool until TXG_DEFER_SIZE
* transactions groups later. For example, a block that is freed in txg
* 50 will not be available for reallocation until txg 52 (50 +
* TXG_DEFER_SIZE). This provides a safety net for uberblock rollback.
* A pool could be safely rolled back TXG_DEFERS_SIZE transactions
* groups and ensure that no block has been reallocated.
*
* The simplified transition diagram looks like this:
*
*
* ALLOCATE
* |
* V
* free segment (ms_allocatable) -> ms_allocating[4] -> (write to space map)
* ^
* | ms_freeing <--- FREE
* | |
* | v
* | ms_freed
* | |
* +-------- ms_defer[2] <-------+-------> (write to space map)
*
*
* Each metaslab's space is tracked in a single space map in the MOS,
* which is only updated in syncing context. Each time we sync a txg,
* we append the allocs and frees from that txg to the space map. The
* pool space is only updated once all metaslabs have finished syncing.
*
* To load the in-core free tree we read the space map from disk. This
* object contains a series of alloc and free records that are combined
* to make up the list of all free segments in this metaslab. These
* segments are represented in-core by the ms_allocatable and are stored
* in an AVL tree.
*
* As the space map grows (as a result of the appends) it will
* eventually become space-inefficient. When the metaslab's in-core
* free tree is zfs_condense_pct/100 times the size of the minimal
* on-disk representation, we rewrite it in its minimized form. If a
* metaslab needs to condense then we must set the ms_condensing flag to
* ensure that allocations are not performed on the metaslab that is
* being written.
*/
struct metaslab {
/*
* This is the main lock of the metaslab and its purpose is to
* coordinate our allocations and frees [e.g., metaslab_block_alloc(),
* metaslab_free_concrete(), ..etc] with our various syncing
* procedures [e.g., metaslab_sync(), metaslab_sync_done(), ..etc].
*
* The lock is also used during some miscellaneous operations like
* using the metaslab's histogram for the metaslab group's histogram
* aggregation, or marking the metaslab for initialization.
*/
kmutex_t ms_lock;
/*
* Acquired together with the ms_lock whenever we expect to
* write to metaslab data on-disk (i.e flushing entries to
* the metaslab's space map). It helps coordinate readers of
* the metaslab's space map [see spa_vdev_remove_thread()]
* with writers [see metaslab_sync() or metaslab_flush()].
*
* Note that metaslab_load(), even though a reader, uses
* a completely different mechanism to deal with the reading
* of the metaslab's space map based on ms_synced_length. That
* said, the function still uses the ms_sync_lock after it
* has read the ms_sm [see relevant comment in metaslab_load()
* as to why].
*/
kmutex_t ms_sync_lock;
kcondvar_t ms_load_cv;
space_map_t *ms_sm;
uint64_t ms_id;
uint64_t ms_start;
uint64_t ms_size;
uint64_t ms_fragmentation;
- range_tree_t *ms_allocating[TXG_SIZE];
- range_tree_t *ms_allocatable;
+ zfs_range_tree_t *ms_allocating[TXG_SIZE];
+ zfs_range_tree_t *ms_allocatable;
uint64_t ms_allocated_this_txg;
uint64_t ms_allocating_total;
/*
* The following range trees are accessed only from syncing context.
* ms_free*tree only have entries while syncing, and are empty
* between syncs.
*/
- range_tree_t *ms_freeing; /* to free this syncing txg */
- range_tree_t *ms_freed; /* already freed this syncing txg */
- range_tree_t *ms_defer[TXG_DEFER_SIZE];
- range_tree_t *ms_checkpointing; /* to add to the checkpoint */
+ zfs_range_tree_t *ms_freeing; /* to free this syncing txg */
+ /* already freed this syncing txg */
+ zfs_range_tree_t *ms_freed;
+ zfs_range_tree_t *ms_defer[TXG_DEFER_SIZE];
+ /* to add to the checkpoint */
+ zfs_range_tree_t *ms_checkpointing;
/*
* The ms_trim tree is the set of allocatable segments which are
* eligible for trimming. (When the metaslab is loaded, it's a
* subset of ms_allocatable.) It's kept in-core as long as the
* autotrim property is set and is not vacated when the metaslab
* is unloaded. Its purpose is to aggregate freed ranges to
* facilitate efficient trimming.
*/
- range_tree_t *ms_trim;
+ zfs_range_tree_t *ms_trim;
boolean_t ms_condensing; /* condensing? */
boolean_t ms_condense_wanted;
/*
* The number of consumers which have disabled the metaslab.
*/
uint64_t ms_disabled;
/*
* We must always hold the ms_lock when modifying ms_loaded
* and ms_loading.
*/
boolean_t ms_loaded;
boolean_t ms_loading;
kcondvar_t ms_flush_cv;
boolean_t ms_flushing;
/*
* The following histograms count entries that are in the
* metaslab's space map (and its histogram) but are not in
* ms_allocatable yet, because they are in ms_freed, ms_freeing,
* or ms_defer[].
*
* When the metaslab is not loaded, its ms_weight needs to
* reflect what is allocatable (i.e. what will be part of
* ms_allocatable if it is loaded). The weight is computed from
* the spacemap histogram, but that includes ranges that are
* not yet allocatable (because they are in ms_freed,
* ms_freeing, or ms_defer[]). Therefore, when calculating the
* weight, we need to remove those ranges.
*
* The ranges in the ms_freed and ms_defer[] range trees are all
* present in the spacemap. However, the spacemap may have
* multiple entries to represent a contiguous range, because it
* is written across multiple sync passes, but the changes of
* all sync passes are consolidated into the range trees.
* Adjacent ranges that are freed in different sync passes of
* one txg will be represented separately (as 2 or more entries)
* in the space map (and its histogram), but these adjacent
* ranges will be consolidated (represented as one entry) in the
* ms_freed/ms_defer[] range trees (and their histograms).
*
* When calculating the weight, we can not simply subtract the
* range trees' histograms from the spacemap's histogram,
* because the range trees' histograms may have entries in
* higher buckets than the spacemap, due to consolidation.
* Instead we must subtract the exact entries that were added to
* the spacemap's histogram. ms_synchist and ms_deferhist[]
* represent these exact entries, so we can subtract them from
* the spacemap's histogram when calculating ms_weight.
*
* ms_synchist represents the same ranges as ms_freeing +
* ms_freed, but without consolidation across sync passes.
*
* ms_deferhist[i] represents the same ranges as ms_defer[i],
* but without consolidation across sync passes.
*/
uint64_t ms_synchist[SPACE_MAP_HISTOGRAM_SIZE];
uint64_t ms_deferhist[TXG_DEFER_SIZE][SPACE_MAP_HISTOGRAM_SIZE];
/*
* Tracks the exact amount of allocated space of this metaslab
* (and specifically the metaslab's space map) up to the most
* recently completed sync pass [see usage in metaslab_sync()].
*/
uint64_t ms_allocated_space;
int64_t ms_deferspace; /* sum of ms_defermap[] space */
uint64_t ms_weight; /* weight vs. others in group */
uint64_t ms_activation_weight; /* activation weight */
/*
* Track of whenever a metaslab is selected for loading or allocation.
* We use this value to determine how long the metaslab should
* stay cached.
*/
uint64_t ms_selected_txg;
/*
* ms_load/unload_time can be used for performance monitoring
* (e.g. by dtrace or mdb).
*/
hrtime_t ms_load_time; /* time last loaded */
hrtime_t ms_unload_time; /* time last unloaded */
hrtime_t ms_selected_time; /* time last allocated from */
uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
uint64_t ms_max_size; /* maximum allocatable size */
/*
* -1 if it's not active in an allocator, otherwise set to the allocator
* this metaslab is active for.
*/
int ms_allocator;
boolean_t ms_primary; /* Only valid if ms_allocator is not -1 */
/*
* The metaslab block allocators can optionally use a size-ordered
* range tree and/or an array of LBAs. Not all allocators use
* this functionality. The ms_allocatable_by_size should always
* contain the same number of segments as the ms_allocatable. The
* only difference is that the ms_allocatable_by_size is ordered by
* segment sizes.
*/
zfs_btree_t ms_allocatable_by_size;
zfs_btree_t ms_unflushed_frees_by_size;
uint64_t ms_lbas[MAX_LBAS];
metaslab_group_t *ms_group; /* metaslab group */
avl_node_t ms_group_node; /* node in metaslab group tree */
txg_node_t ms_txg_node; /* per-txg dirty metaslab links */
avl_node_t ms_spa_txg_node; /* node in spa_metaslabs_by_txg */
/*
* Node in metaslab class's selected txg list
*/
multilist_node_t ms_class_txg_node;
/*
* Allocs and frees that are committed to the vdev log spacemap but
* not yet to this metaslab's spacemap.
*/
- range_tree_t *ms_unflushed_allocs;
- range_tree_t *ms_unflushed_frees;
+ zfs_range_tree_t *ms_unflushed_allocs;
+ zfs_range_tree_t *ms_unflushed_frees;
/*
* We have flushed entries up to but not including this TXG. In
* other words, all changes from this TXG and onward should not
* be in this metaslab's space map and must be read from the
* log space maps.
*/
uint64_t ms_unflushed_txg;
boolean_t ms_unflushed_dirty;
/* updated every time we are done syncing the metaslab's space map */
uint64_t ms_synced_length;
boolean_t ms_new;
};
typedef struct metaslab_unflushed_phys {
/* on-disk counterpart of ms_unflushed_txg */
uint64_t msp_unflushed_txg;
} metaslab_unflushed_phys_t;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_METASLAB_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/range_tree.h b/sys/contrib/openzfs/include/sys/range_tree.h
index d6f60e795288..23eea3210c98 100644
--- a/sys/contrib/openzfs/include/sys/range_tree.h
+++ b/sys/contrib/openzfs/include/sys/range_tree.h
@@ -1,319 +1,326 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_RANGE_TREE_H
#define _SYS_RANGE_TREE_H
#include <sys/btree.h>
#include <sys/dmu.h>
#ifdef __cplusplus
extern "C" {
#endif
-#define RANGE_TREE_HISTOGRAM_SIZE 64
+#define ZFS_RANGE_TREE_HISTOGRAM_SIZE 64
-typedef struct range_tree_ops range_tree_ops_t;
+typedef struct zfs_range_tree_ops zfs_range_tree_ops_t;
-typedef enum range_seg_type {
- RANGE_SEG32,
- RANGE_SEG64,
- RANGE_SEG_GAP,
- RANGE_SEG_NUM_TYPES,
-} range_seg_type_t;
+typedef enum zfs_range_seg_type {
+ ZFS_RANGE_SEG32,
+ ZFS_RANGE_SEG64,
+ ZFS_RANGE_SEG_GAP,
+ ZFS_RANGE_SEG_NUM_TYPES,
+} zfs_range_seg_type_t;
/*
* Note: the range_tree may not be accessed concurrently; consumers
* must provide external locking if required.
*/
-typedef struct range_tree {
+typedef struct zfs_range_tree {
zfs_btree_t rt_root; /* offset-ordered segment b-tree */
uint64_t rt_space; /* sum of all segments in the map */
- range_seg_type_t rt_type; /* type of range_seg_t in use */
+ zfs_range_seg_type_t rt_type; /* type of zfs_range_seg_t in use */
/*
* All data that is stored in the range tree must have a start higher
* than or equal to rt_start, and all sizes and offsets must be
* multiples of 1 << rt_shift.
*/
uint8_t rt_shift;
uint64_t rt_start;
- const range_tree_ops_t *rt_ops;
+ const zfs_range_tree_ops_t *rt_ops;
void *rt_arg;
uint64_t rt_gap; /* allowable inter-segment gap */
/*
* The rt_histogram maintains a histogram of ranges. Each bucket,
* rt_histogram[i], contains the number of ranges whose size is:
* 2^i <= size of range in bytes < 2^(i+1)
*/
- uint64_t rt_histogram[RANGE_TREE_HISTOGRAM_SIZE];
-} range_tree_t;
+ uint64_t rt_histogram[ZFS_RANGE_TREE_HISTOGRAM_SIZE];
+} zfs_range_tree_t;
-typedef struct range_seg32 {
+typedef struct zfs_range_seg32 {
uint32_t rs_start; /* starting offset of this segment */
uint32_t rs_end; /* ending offset (non-inclusive) */
-} range_seg32_t;
+} zfs_range_seg32_t;
/*
* Extremely large metaslabs, vdev-wide trees, and dnode-wide trees may
* require 64-bit integers for ranges.
*/
-typedef struct range_seg64 {
+typedef struct zfs_range_seg64 {
uint64_t rs_start; /* starting offset of this segment */
uint64_t rs_end; /* ending offset (non-inclusive) */
-} range_seg64_t;
+} zfs_range_seg64_t;
-typedef struct range_seg_gap {
+typedef struct zfs_range_seg_gap {
uint64_t rs_start; /* starting offset of this segment */
uint64_t rs_end; /* ending offset (non-inclusive) */
uint64_t rs_fill; /* actual fill if gap mode is on */
-} range_seg_gap_t;
+} zfs_range_seg_gap_t;
/*
* This type needs to be the largest of the range segs, since it will be stack
* allocated and then cast the actual type to do tree operations.
*/
-typedef range_seg_gap_t range_seg_max_t;
+typedef zfs_range_seg_gap_t zfs_range_seg_max_t;
/*
* This is just for clarity of code purposes, so we can make it clear that a
* pointer is to a range seg of some type; when we need to do the actual math,
* we'll figure out the real type.
*/
-typedef void range_seg_t;
-
-struct range_tree_ops {
- void (*rtop_create)(range_tree_t *rt, void *arg);
- void (*rtop_destroy)(range_tree_t *rt, void *arg);
- void (*rtop_add)(range_tree_t *rt, void *rs, void *arg);
- void (*rtop_remove)(range_tree_t *rt, void *rs, void *arg);
- void (*rtop_vacate)(range_tree_t *rt, void *arg);
+typedef void zfs_range_seg_t;
+
+struct zfs_range_tree_ops {
+ void (*rtop_create)(zfs_range_tree_t *rt, void *arg);
+ void (*rtop_destroy)(zfs_range_tree_t *rt, void *arg);
+ void (*rtop_add)(zfs_range_tree_t *rt, void *rs, void *arg);
+ void (*rtop_remove)(zfs_range_tree_t *rt, void *rs, void *arg);
+ void (*rtop_vacate)(zfs_range_tree_t *rt, void *arg);
};
static inline uint64_t
-rs_get_start_raw(const range_seg_t *rs, const range_tree_t *rt)
+zfs_rs_get_start_raw(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{
- ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
+ ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
- case RANGE_SEG32:
- return (((const range_seg32_t *)rs)->rs_start);
- case RANGE_SEG64:
- return (((const range_seg64_t *)rs)->rs_start);
- case RANGE_SEG_GAP:
- return (((const range_seg_gap_t *)rs)->rs_start);
+ case ZFS_RANGE_SEG32:
+ return (((const zfs_range_seg32_t *)rs)->rs_start);
+ case ZFS_RANGE_SEG64:
+ return (((const zfs_range_seg64_t *)rs)->rs_start);
+ case ZFS_RANGE_SEG_GAP:
+ return (((const zfs_range_seg_gap_t *)rs)->rs_start);
default:
VERIFY(0);
return (0);
}
}
static inline uint64_t
-rs_get_end_raw(const range_seg_t *rs, const range_tree_t *rt)
+zfs_rs_get_end_raw(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{
- ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
+ ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
- case RANGE_SEG32:
- return (((const range_seg32_t *)rs)->rs_end);
- case RANGE_SEG64:
- return (((const range_seg64_t *)rs)->rs_end);
- case RANGE_SEG_GAP:
- return (((const range_seg_gap_t *)rs)->rs_end);
+ case ZFS_RANGE_SEG32:
+ return (((const zfs_range_seg32_t *)rs)->rs_end);
+ case ZFS_RANGE_SEG64:
+ return (((const zfs_range_seg64_t *)rs)->rs_end);
+ case ZFS_RANGE_SEG_GAP:
+ return (((const zfs_range_seg_gap_t *)rs)->rs_end);
default:
VERIFY(0);
return (0);
}
}
static inline uint64_t
-rs_get_fill_raw(const range_seg_t *rs, const range_tree_t *rt)
+zfs_rs_get_fill_raw(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{
- ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
+ ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
- case RANGE_SEG32: {
- const range_seg32_t *r32 = (const range_seg32_t *)rs;
+ case ZFS_RANGE_SEG32: {
+ const zfs_range_seg32_t *r32 = (const zfs_range_seg32_t *)rs;
return (r32->rs_end - r32->rs_start);
}
- case RANGE_SEG64: {
- const range_seg64_t *r64 = (const range_seg64_t *)rs;
+ case ZFS_RANGE_SEG64: {
+ const zfs_range_seg64_t *r64 = (const zfs_range_seg64_t *)rs;
return (r64->rs_end - r64->rs_start);
}
- case RANGE_SEG_GAP:
- return (((const range_seg_gap_t *)rs)->rs_fill);
+ case ZFS_RANGE_SEG_GAP:
+ return (((const zfs_range_seg_gap_t *)rs)->rs_fill);
default:
VERIFY(0);
return (0);
}
}
static inline uint64_t
-rs_get_start(const range_seg_t *rs, const range_tree_t *rt)
+zfs_rs_get_start(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{
- return ((rs_get_start_raw(rs, rt) << rt->rt_shift) + rt->rt_start);
+ return ((zfs_rs_get_start_raw(rs, rt) << rt->rt_shift) + rt->rt_start);
}
static inline uint64_t
-rs_get_end(const range_seg_t *rs, const range_tree_t *rt)
+zfs_rs_get_end(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{
- return ((rs_get_end_raw(rs, rt) << rt->rt_shift) + rt->rt_start);
+ return ((zfs_rs_get_end_raw(rs, rt) << rt->rt_shift) + rt->rt_start);
}
static inline uint64_t
-rs_get_fill(const range_seg_t *rs, const range_tree_t *rt)
+zfs_rs_get_fill(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
{
- return (rs_get_fill_raw(rs, rt) << rt->rt_shift);
+ return (zfs_rs_get_fill_raw(rs, rt) << rt->rt_shift);
}
static inline void
-rs_set_start_raw(range_seg_t *rs, range_tree_t *rt, uint64_t start)
+zfs_rs_set_start_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t start)
{
- ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
+ ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
- case RANGE_SEG32:
+ case ZFS_RANGE_SEG32:
ASSERT3U(start, <=, UINT32_MAX);
- ((range_seg32_t *)rs)->rs_start = (uint32_t)start;
+ ((zfs_range_seg32_t *)rs)->rs_start = (uint32_t)start;
break;
- case RANGE_SEG64:
- ((range_seg64_t *)rs)->rs_start = start;
+ case ZFS_RANGE_SEG64:
+ ((zfs_range_seg64_t *)rs)->rs_start = start;
break;
- case RANGE_SEG_GAP:
- ((range_seg_gap_t *)rs)->rs_start = start;
+ case ZFS_RANGE_SEG_GAP:
+ ((zfs_range_seg_gap_t *)rs)->rs_start = start;
break;
default:
VERIFY(0);
}
}
static inline void
-rs_set_end_raw(range_seg_t *rs, range_tree_t *rt, uint64_t end)
+zfs_rs_set_end_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t end)
{
- ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
+ ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
- case RANGE_SEG32:
+ case ZFS_RANGE_SEG32:
ASSERT3U(end, <=, UINT32_MAX);
- ((range_seg32_t *)rs)->rs_end = (uint32_t)end;
+ ((zfs_range_seg32_t *)rs)->rs_end = (uint32_t)end;
break;
- case RANGE_SEG64:
- ((range_seg64_t *)rs)->rs_end = end;
+ case ZFS_RANGE_SEG64:
+ ((zfs_range_seg64_t *)rs)->rs_end = end;
break;
- case RANGE_SEG_GAP:
- ((range_seg_gap_t *)rs)->rs_end = end;
+ case ZFS_RANGE_SEG_GAP:
+ ((zfs_range_seg_gap_t *)rs)->rs_end = end;
break;
default:
VERIFY(0);
}
}
static inline void
-rs_set_fill_raw(range_seg_t *rs, range_tree_t *rt, uint64_t fill)
+zfs_zfs_rs_set_fill_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt,
+ uint64_t fill)
{
- ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
+ ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
- case RANGE_SEG32:
+ case ZFS_RANGE_SEG32:
/* fall through */
- case RANGE_SEG64:
- ASSERT3U(fill, ==, rs_get_end_raw(rs, rt) - rs_get_start_raw(rs,
- rt));
+ case ZFS_RANGE_SEG64:
+ ASSERT3U(fill, ==, zfs_rs_get_end_raw(rs, rt) -
+ zfs_rs_get_start_raw(rs, rt));
break;
- case RANGE_SEG_GAP:
- ((range_seg_gap_t *)rs)->rs_fill = fill;
+ case ZFS_RANGE_SEG_GAP:
+ ((zfs_range_seg_gap_t *)rs)->rs_fill = fill;
break;
default:
VERIFY(0);
}
}
static inline void
-rs_set_start(range_seg_t *rs, range_tree_t *rt, uint64_t start)
+zfs_rs_set_start(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t start)
{
ASSERT3U(start, >=, rt->rt_start);
ASSERT(IS_P2ALIGNED(start, 1ULL << rt->rt_shift));
- rs_set_start_raw(rs, rt, (start - rt->rt_start) >> rt->rt_shift);
+ zfs_rs_set_start_raw(rs, rt, (start - rt->rt_start) >> rt->rt_shift);
}
static inline void
-rs_set_end(range_seg_t *rs, range_tree_t *rt, uint64_t end)
+zfs_rs_set_end(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t end)
{
ASSERT3U(end, >=, rt->rt_start);
ASSERT(IS_P2ALIGNED(end, 1ULL << rt->rt_shift));
- rs_set_end_raw(rs, rt, (end - rt->rt_start) >> rt->rt_shift);
+ zfs_rs_set_end_raw(rs, rt, (end - rt->rt_start) >> rt->rt_shift);
}
static inline void
-rs_set_fill(range_seg_t *rs, range_tree_t *rt, uint64_t fill)
+zfs_rs_set_fill(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t fill)
{
ASSERT(IS_P2ALIGNED(fill, 1ULL << rt->rt_shift));
- rs_set_fill_raw(rs, rt, fill >> rt->rt_shift);
+ zfs_zfs_rs_set_fill_raw(rs, rt, fill >> rt->rt_shift);
}
-typedef void range_tree_func_t(void *arg, uint64_t start, uint64_t size);
+typedef void zfs_range_tree_func_t(void *arg, uint64_t start, uint64_t size);
-range_tree_t *range_tree_create_gap(const range_tree_ops_t *ops,
- range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
+zfs_range_tree_t *zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
+ zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
uint64_t gap);
-range_tree_t *range_tree_create(const range_tree_ops_t *ops,
- range_seg_type_t type, void *arg, uint64_t start, uint64_t shift);
-void range_tree_destroy(range_tree_t *rt);
-boolean_t range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size);
-range_seg_t *range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size);
-boolean_t range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size,
- uint64_t *ostart, uint64_t *osize);
-void range_tree_verify_not_present(range_tree_t *rt,
+zfs_range_tree_t *zfs_range_tree_create(const zfs_range_tree_ops_t *ops,
+ zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift);
+void zfs_range_tree_destroy(zfs_range_tree_t *rt);
+boolean_t zfs_range_tree_contains(zfs_range_tree_t *rt, uint64_t start,
+ uint64_t size);
+zfs_range_seg_t *zfs_range_tree_find(zfs_range_tree_t *rt, uint64_t start,
+ uint64_t size);
+boolean_t zfs_range_tree_find_in(zfs_range_tree_t *rt, uint64_t start,
+ uint64_t size, uint64_t *ostart, uint64_t *osize);
+void zfs_range_tree_verify_not_present(zfs_range_tree_t *rt,
uint64_t start, uint64_t size);
-void range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
+void zfs_range_tree_resize_segment(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
uint64_t newstart, uint64_t newsize);
-uint64_t range_tree_space(range_tree_t *rt);
-uint64_t range_tree_numsegs(range_tree_t *rt);
-boolean_t range_tree_is_empty(range_tree_t *rt);
-void range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst);
-void range_tree_stat_verify(range_tree_t *rt);
-uint64_t range_tree_min(range_tree_t *rt);
-uint64_t range_tree_max(range_tree_t *rt);
-uint64_t range_tree_span(range_tree_t *rt);
-
-void range_tree_add(void *arg, uint64_t start, uint64_t size);
-void range_tree_remove(void *arg, uint64_t start, uint64_t size);
-void range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size);
-void range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta);
-void range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size);
-
-void range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg);
-void range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg);
-range_seg_t *range_tree_first(range_tree_t *rt);
-
-void range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
- range_tree_t *removefrom, range_tree_t *addto);
-void range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom,
- range_tree_t *addto);
+uint64_t zfs_range_tree_space(zfs_range_tree_t *rt);
+uint64_t zfs_range_tree_numsegs(zfs_range_tree_t *rt);
+boolean_t zfs_range_tree_is_empty(zfs_range_tree_t *rt);
+void zfs_range_tree_swap(zfs_range_tree_t **rtsrc, zfs_range_tree_t **rtdst);
+void zfs_range_tree_stat_verify(zfs_range_tree_t *rt);
+uint64_t zfs_range_tree_min(zfs_range_tree_t *rt);
+uint64_t zfs_range_tree_max(zfs_range_tree_t *rt);
+uint64_t zfs_range_tree_span(zfs_range_tree_t *rt);
+
+void zfs_range_tree_add(void *arg, uint64_t start, uint64_t size);
+void zfs_range_tree_remove(void *arg, uint64_t start, uint64_t size);
+void zfs_range_tree_remove_fill(zfs_range_tree_t *rt, uint64_t start,
+ uint64_t size);
+void zfs_range_tree_adjust_fill(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
+ int64_t delta);
+void zfs_range_tree_clear(zfs_range_tree_t *rt, uint64_t start, uint64_t size);
+
+void zfs_range_tree_vacate(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
+ void *arg);
+void zfs_range_tree_walk(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
+ void *arg);
+zfs_range_seg_t *zfs_range_tree_first(zfs_range_tree_t *rt);
+
+void zfs_range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
+ zfs_range_tree_t *removefrom, zfs_range_tree_t *addto);
+void zfs_range_tree_remove_xor_add(zfs_range_tree_t *rt,
+ zfs_range_tree_t *removefrom, zfs_range_tree_t *addto);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_RANGE_TREE_H */
diff --git a/sys/contrib/openzfs/include/sys/space_map.h b/sys/contrib/openzfs/include/sys/space_map.h
index 14c5beccee55..2861b25e41ee 100644
--- a/sys/contrib/openzfs/include/sys/space_map.h
+++ b/sys/contrib/openzfs/include/sys/space_map.h
@@ -1,246 +1,246 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_SPACE_MAP_H
#define _SYS_SPACE_MAP_H
#include <sys/avl.h>
#include <sys/range_tree.h>
#include <sys/dmu.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* The size of the space map object has increased to include a histogram.
* The SPACE_MAP_SIZE_V0 designates the original size and is used to
* maintain backward compatibility.
*/
#define SPACE_MAP_SIZE_V0 (3 * sizeof (uint64_t))
#define SPACE_MAP_HISTOGRAM_SIZE 32
/*
* The space_map_phys is the on-disk representation of the space map.
* Consumers of space maps should never reference any of the members of this
* structure directly. These members may only be updated in syncing context.
*
* Note the smp_object is no longer used but remains in the structure
* for backward compatibility.
*/
typedef struct space_map_phys {
/* object number: not needed but kept for backwards compatibility */
uint64_t smp_object;
/* length of the object in bytes */
uint64_t smp_length;
/* space allocated from the map */
int64_t smp_alloc;
/* reserved */
uint64_t smp_pad[5];
/*
* The smp_histogram maintains a histogram of free regions. Each
* bucket, smp_histogram[i], contains the number of free regions
* whose size is:
* 2^(i+sm_shift) <= size of free region in bytes < 2^(i+sm_shift+1)
*
* Note that, if log space map feature is enabled, histograms of
* space maps that belong to metaslabs will take into account any
* unflushed changes for their metaslabs, even though the actual
* space map doesn't have entries for these changes.
*/
uint64_t smp_histogram[SPACE_MAP_HISTOGRAM_SIZE];
} space_map_phys_t;
/*
* The space map object defines a region of space, its size, how much is
* allocated, and the on-disk object that stores this information.
* Consumers of space maps may only access the members of this structure.
*
* Note: the space_map may not be accessed concurrently; consumers
* must provide external locking if required.
*/
typedef struct space_map {
uint64_t sm_start; /* start of map */
uint64_t sm_size; /* size of map */
uint8_t sm_shift; /* unit shift */
objset_t *sm_os; /* objset for this map */
uint64_t sm_object; /* object id for this map */
uint32_t sm_blksz; /* block size for space map */
dmu_buf_t *sm_dbuf; /* space_map_phys_t dbuf */
space_map_phys_t *sm_phys; /* on-disk space map */
} space_map_t;
/*
* debug entry
*
* 2 2 10 50
* +-----+-----+------------+----------------------------------+
* | 1 0 | act | syncpass | txg (lower bits) |
* +-----+-----+------------+----------------------------------+
* 63 62 61 60 59 50 49 0
*
*
* one-word entry
*
* 1 47 1 15
* +-----------------------------------------------------------+
* | 0 | offset (sm_shift units) | type | run |
* +-----------------------------------------------------------+
* 63 62 16 15 14 0
*
*
* two-word entry
*
* 2 2 36 24
* +-----+-----+---------------------------+-------------------+
* | 1 1 | pad | run | vdev |
* +-----+-----+---------------------------+-------------------+
* 63 62 61 60 59 24 23 0
*
* 1 63
* +------+----------------------------------------------------+
* | type | offset |
* +------+----------------------------------------------------+
* 63 62 0
*
* Note that a two-word entry will not straddle a block boundary.
* If necessary, the last word of a block will be padded with a
* debug entry (with act = syncpass = txg = 0).
*/
typedef enum {
SM_ALLOC,
SM_FREE
} maptype_t;
typedef struct space_map_entry {
maptype_t sme_type;
uint32_t sme_vdev; /* max is 2^24-1; SM_NO_VDEVID if not present */
uint64_t sme_offset; /* max is 2^63-1; units of sm_shift */
uint64_t sme_run; /* max is 2^36; units of sm_shift */
/*
* The following fields are not part of the actual space map entry
* on-disk and they are populated with the values from the debug
* entry most recently visited starting from the beginning to the
* end of the space map.
*/
uint64_t sme_txg;
uint64_t sme_sync_pass;
} space_map_entry_t;
#define SM_NO_VDEVID (1 << SPA_VDEVBITS)
/* one-word entry constants */
#define SM_DEBUG_PREFIX 2
#define SM_OFFSET_BITS 47
#define SM_RUN_BITS 15
/* two-word entry constants */
#define SM2_PREFIX 3
#define SM2_OFFSET_BITS 63
#define SM2_RUN_BITS 36
#define SM_PREFIX_DECODE(x) BF64_DECODE(x, 62, 2)
#define SM_PREFIX_ENCODE(x) BF64_ENCODE(x, 62, 2)
#define SM_DEBUG_ACTION_DECODE(x) BF64_DECODE(x, 60, 2)
#define SM_DEBUG_ACTION_ENCODE(x) BF64_ENCODE(x, 60, 2)
#define SM_DEBUG_SYNCPASS_DECODE(x) BF64_DECODE(x, 50, 10)
#define SM_DEBUG_SYNCPASS_ENCODE(x) BF64_ENCODE(x, 50, 10)
#define SM_DEBUG_TXG_DECODE(x) BF64_DECODE(x, 0, 50)
#define SM_DEBUG_TXG_ENCODE(x) BF64_ENCODE(x, 0, 50)
#define SM_OFFSET_DECODE(x) BF64_DECODE(x, 16, SM_OFFSET_BITS)
#define SM_OFFSET_ENCODE(x) BF64_ENCODE(x, 16, SM_OFFSET_BITS)
#define SM_TYPE_DECODE(x) BF64_DECODE(x, 15, 1)
#define SM_TYPE_ENCODE(x) BF64_ENCODE(x, 15, 1)
#define SM_RUN_DECODE(x) (BF64_DECODE(x, 0, SM_RUN_BITS) + 1)
#define SM_RUN_ENCODE(x) BF64_ENCODE((x) - 1, 0, SM_RUN_BITS)
#define SM_RUN_MAX SM_RUN_DECODE(~0ULL)
#define SM_OFFSET_MAX SM_OFFSET_DECODE(~0ULL)
#define SM2_RUN_DECODE(x) (BF64_DECODE(x, SPA_VDEVBITS, SM2_RUN_BITS) + 1)
#define SM2_RUN_ENCODE(x) BF64_ENCODE((x) - 1, SPA_VDEVBITS, SM2_RUN_BITS)
#define SM2_VDEV_DECODE(x) BF64_DECODE(x, 0, SPA_VDEVBITS)
#define SM2_VDEV_ENCODE(x) BF64_ENCODE(x, 0, SPA_VDEVBITS)
#define SM2_TYPE_DECODE(x) BF64_DECODE(x, SM2_OFFSET_BITS, 1)
#define SM2_TYPE_ENCODE(x) BF64_ENCODE(x, SM2_OFFSET_BITS, 1)
#define SM2_OFFSET_DECODE(x) BF64_DECODE(x, 0, SM2_OFFSET_BITS)
#define SM2_OFFSET_ENCODE(x) BF64_ENCODE(x, 0, SM2_OFFSET_BITS)
#define SM2_RUN_MAX SM2_RUN_DECODE(~0ULL)
#define SM2_OFFSET_MAX SM2_OFFSET_DECODE(~0ULL)
boolean_t sm_entry_is_debug(uint64_t e);
boolean_t sm_entry_is_single_word(uint64_t e);
boolean_t sm_entry_is_double_word(uint64_t e);
typedef int (*sm_cb_t)(space_map_entry_t *sme, void *arg);
-int space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype);
-int space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
- uint64_t length);
+int space_map_load(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype);
+int space_map_load_length(space_map_t *sm, zfs_range_tree_t *rt,
+ maptype_t maptype, uint64_t length);
int space_map_iterate(space_map_t *sm, uint64_t length,
sm_cb_t callback, void *arg);
int space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
dmu_tx_t *tx);
-boolean_t space_map_histogram_verify(space_map_t *sm, range_tree_t *rt);
+boolean_t space_map_histogram_verify(space_map_t *sm, zfs_range_tree_t *rt);
void space_map_histogram_clear(space_map_t *sm);
-void space_map_histogram_add(space_map_t *sm, range_tree_t *rt,
+void space_map_histogram_add(space_map_t *sm, zfs_range_tree_t *rt,
dmu_tx_t *tx);
uint64_t space_map_object(space_map_t *sm);
int64_t space_map_allocated(space_map_t *sm);
uint64_t space_map_length(space_map_t *sm);
-uint64_t space_map_entries(space_map_t *sm, range_tree_t *rt);
+uint64_t space_map_entries(space_map_t *sm, zfs_range_tree_t *rt);
uint64_t space_map_nblocks(space_map_t *sm);
-void space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
+void space_map_write(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype,
uint64_t vdev_id, dmu_tx_t *tx);
-uint64_t space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt,
+uint64_t space_map_estimate_optimal_size(space_map_t *sm, zfs_range_tree_t *rt,
uint64_t vdev_id);
void space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx);
uint64_t space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx);
void space_map_free(space_map_t *sm, dmu_tx_t *tx);
void space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx);
int space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
uint64_t start, uint64_t size, uint8_t shift);
void space_map_close(space_map_t *sm);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPACE_MAP_H */
diff --git a/sys/contrib/openzfs/include/sys/space_reftree.h b/sys/contrib/openzfs/include/sys/space_reftree.h
index b7a846aec624..e9a44ecf46b3 100644
--- a/sys/contrib/openzfs/include/sys/space_reftree.h
+++ b/sys/contrib/openzfs/include/sys/space_reftree.h
@@ -1,57 +1,57 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#ifndef _SYS_SPACE_REFTREE_H
#define _SYS_SPACE_REFTREE_H
#include <sys/range_tree.h>
#include <sys/avl.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct space_ref {
avl_node_t sr_node; /* AVL node */
uint64_t sr_offset; /* range offset (start or end) */
int64_t sr_refcnt; /* associated reference count */
} space_ref_t;
void space_reftree_create(avl_tree_t *t);
void space_reftree_destroy(avl_tree_t *t);
void space_reftree_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
int64_t refcnt);
-void space_reftree_add_map(avl_tree_t *t, range_tree_t *rt, int64_t refcnt);
-void space_reftree_generate_map(avl_tree_t *t, range_tree_t *rt,
+void space_reftree_add_map(avl_tree_t *t, zfs_range_tree_t *rt, int64_t refcnt);
+void space_reftree_generate_map(avl_tree_t *t, zfs_range_tree_t *rt,
int64_t minref);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPACE_REFTREE_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev.h b/sys/contrib/openzfs/include/sys/vdev.h
index 38f62b07dc59..6ab7ac40bb07 100644
--- a/sys/contrib/openzfs/include/sys/vdev.h
+++ b/sys/contrib/openzfs/include/sys/vdev.h
@@ -1,232 +1,232 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
*/
#ifndef _SYS_VDEV_H
#define _SYS_VDEV_H
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu.h>
#include <sys/space_map.h>
#include <sys/metaslab.h>
#include <sys/fs/zfs.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum vdev_dtl_type {
DTL_MISSING, /* 0% replication: no copies of the data */
DTL_PARTIAL, /* less than 100% replication: some copies missing */
DTL_SCRUB, /* unable to fully repair during scrub/resilver */
DTL_OUTAGE, /* temporarily missing (used to attempt detach) */
DTL_TYPES
} vdev_dtl_type_t;
extern int zfs_nocacheflush;
typedef boolean_t vdev_open_children_func_t(vdev_t *vd);
extern void vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
extern void vdev_dbgmsg_print_tree(vdev_t *, int);
extern int vdev_open(vdev_t *);
extern void vdev_open_children(vdev_t *);
extern void vdev_open_children_subset(vdev_t *, vdev_open_children_func_t *);
extern int vdev_validate(vdev_t *);
extern int vdev_copy_path_strict(vdev_t *, vdev_t *);
extern void vdev_copy_path_relaxed(vdev_t *, vdev_t *);
extern void vdev_close(vdev_t *);
extern int vdev_create(vdev_t *, uint64_t txg, boolean_t isreplace);
extern void vdev_reopen(vdev_t *);
extern int vdev_validate_aux(vdev_t *vd);
extern zio_t *vdev_probe(vdev_t *vd, zio_t *pio);
extern boolean_t vdev_is_concrete(vdev_t *vd);
extern boolean_t vdev_is_bootable(vdev_t *vd);
extern vdev_t *vdev_lookup_top(spa_t *spa, uint64_t vdev);
extern vdev_t *vdev_lookup_by_guid(vdev_t *vd, uint64_t guid);
extern int vdev_count_leaves(spa_t *spa);
extern void vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t d,
uint64_t txg, uint64_t size);
extern boolean_t vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t d,
uint64_t txg, uint64_t size);
extern boolean_t vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t d);
extern boolean_t vdev_default_need_resilver(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
extern boolean_t vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
extern void vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done);
extern boolean_t vdev_dtl_required(vdev_t *vd);
extern boolean_t vdev_resilver_needed(vdev_t *vd,
uint64_t *minp, uint64_t *maxp);
extern void vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj,
dmu_tx_t *tx);
extern uint64_t vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset,
uint64_t size);
extern void spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev,
uint64_t offset, uint64_t size, dmu_tx_t *tx);
extern boolean_t vdev_replace_in_progress(vdev_t *vdev);
extern void vdev_hold(vdev_t *);
extern void vdev_rele(vdev_t *);
extern int vdev_metaslab_init(vdev_t *vd, uint64_t txg);
extern void vdev_metaslab_fini(vdev_t *vd);
extern void vdev_metaslab_set_size(vdev_t *);
extern void vdev_expand(vdev_t *vd, uint64_t txg);
extern void vdev_split(vdev_t *vd);
extern void vdev_deadman(vdev_t *vd, const char *tag);
-typedef void vdev_xlate_func_t(void *arg, range_seg64_t *physical_rs);
+typedef void vdev_xlate_func_t(void *arg, zfs_range_seg64_t *physical_rs);
-extern boolean_t vdev_xlate_is_empty(range_seg64_t *rs);
-extern void vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
- range_seg64_t *physical_rs, range_seg64_t *remain_rs);
-extern void vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
+extern boolean_t vdev_xlate_is_empty(zfs_range_seg64_t *rs);
+extern void vdev_xlate(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
+ zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs);
+extern void vdev_xlate_walk(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg);
extern void vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx);
extern metaslab_group_t *vdev_get_mg(vdev_t *vd, metaslab_class_t *mc);
extern void vdev_get_stats(vdev_t *vd, vdev_stat_t *vs);
extern void vdev_clear_stats(vdev_t *vd);
extern void vdev_stat_update(zio_t *zio, uint64_t psize);
extern void vdev_scan_stat_init(vdev_t *vd);
extern void vdev_propagate_state(vdev_t *vd);
extern void vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state,
vdev_aux_t aux);
extern boolean_t vdev_children_are_offline(vdev_t *vd);
extern void vdev_space_update(vdev_t *vd,
int64_t alloc_delta, int64_t defer_delta, int64_t space_delta);
extern int64_t vdev_deflated_space(vdev_t *vd, int64_t space);
extern uint64_t vdev_psize_to_asize_txg(vdev_t *vd, uint64_t psize,
uint64_t txg);
extern uint64_t vdev_psize_to_asize(vdev_t *vd, uint64_t psize);
/*
* Return the amount of space allocated for a gang block header. Note that
* since the physical birth txg is not provided, this must be constant for
* a given vdev. (e.g. raidz expansion can't change this)
*/
static inline uint64_t
vdev_gang_header_asize(vdev_t *vd)
{
return (vdev_psize_to_asize_txg(vd, SPA_GANGBLOCKSIZE, 0));
}
extern int vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_online(spa_t *spa, uint64_t guid, uint64_t flags,
vdev_state_t *);
extern int vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags);
extern int vdev_remove_wanted(spa_t *spa, uint64_t guid);
extern void vdev_clear(spa_t *spa, vdev_t *vd);
extern boolean_t vdev_is_dead(vdev_t *vd);
extern boolean_t vdev_readable(vdev_t *vd);
extern boolean_t vdev_writeable(vdev_t *vd);
extern boolean_t vdev_allocatable(vdev_t *vd);
extern boolean_t vdev_accessible(vdev_t *vd, zio_t *zio);
extern boolean_t vdev_is_spacemap_addressable(vdev_t *vd);
extern void vdev_queue_init(vdev_t *vd);
extern void vdev_queue_fini(vdev_t *vd);
extern zio_t *vdev_queue_io(zio_t *zio);
extern void vdev_queue_io_done(zio_t *zio);
extern void vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority);
extern uint32_t vdev_queue_length(vdev_t *vd);
extern uint64_t vdev_queue_last_offset(vdev_t *vd);
extern uint64_t vdev_queue_class_length(vdev_t *vq, zio_priority_t p);
extern void vdev_config_dirty(vdev_t *vd);
extern void vdev_config_clean(vdev_t *vd);
extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg);
extern void vdev_state_dirty(vdev_t *vd);
extern void vdev_state_clean(vdev_t *vd);
extern void vdev_defer_resilver(vdev_t *vd);
extern boolean_t vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx);
typedef enum vdev_config_flag {
VDEV_CONFIG_SPARE = 1 << 0,
VDEV_CONFIG_L2CACHE = 1 << 1,
VDEV_CONFIG_MOS = 1 << 2,
VDEV_CONFIG_MISSING = 1 << 3
} vdev_config_flag_t;
extern void vdev_post_kobj_evt(vdev_t *vd);
extern void vdev_clear_kobj_evt(vdev_t *vd);
extern void vdev_top_config_generate(spa_t *spa, nvlist_t *config);
extern nvlist_t *vdev_config_generate(spa_t *spa, vdev_t *vd,
boolean_t getstats, vdev_config_flag_t flags);
/*
* Label routines
*/
struct uberblock;
extern uint64_t vdev_label_offset(uint64_t psize, int l, uint64_t offset);
extern int vdev_label_number(uint64_t psise, uint64_t offset);
extern nvlist_t *vdev_label_read_config(vdev_t *vd, uint64_t txg);
extern void vdev_uberblock_load(vdev_t *, struct uberblock *, nvlist_t **);
extern void vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv);
extern void vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t
offset, uint64_t size, zio_done_func_t *done, void *priv, int flags);
extern int vdev_label_read_bootenv(vdev_t *, nvlist_t *);
extern int vdev_label_write_bootenv(vdev_t *, nvlist_t *);
extern int vdev_uberblock_sync_list(vdev_t **, int, struct uberblock *, int);
extern int vdev_check_boot_reserve(spa_t *, vdev_t *);
typedef enum {
VDEV_LABEL_CREATE, /* create/add a new device */
VDEV_LABEL_REPLACE, /* replace an existing device */
VDEV_LABEL_SPARE, /* add a new hot spare */
VDEV_LABEL_REMOVE, /* remove an existing device */
VDEV_LABEL_L2CACHE, /* add an L2ARC cache device */
VDEV_LABEL_SPLIT /* generating new label for split-off dev */
} vdev_labeltype_t;
extern int vdev_label_init(vdev_t *vd, uint64_t txg, vdev_labeltype_t reason);
extern int vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl);
extern int vdev_prop_get(vdev_t *vd, nvlist_t *nvprops, nvlist_t *outnvl);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_impl.h b/sys/contrib/openzfs/include/sys/vdev_impl.h
index abd66b8abc96..315e2fc88410 100644
--- a/sys/contrib/openzfs/include/sys/vdev_impl.h
+++ b/sys/contrib/openzfs/include/sys/vdev_impl.h
@@ -1,666 +1,671 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2023, Klara Inc.
*/
#ifndef _SYS_VDEV_IMPL_H
#define _SYS_VDEV_IMPL_H
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/metaslab.h>
#include <sys/nvpair.h>
#include <sys/space_map.h>
#include <sys/vdev.h>
#include <sys/uberblock_impl.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_removal.h>
#include <sys/zfs_ratelimit.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Virtual device descriptors.
*
* All storage pool operations go through the virtual device framework,
* which provides data replication and I/O scheduling.
*/
/*
* Forward declarations that lots of things need.
*/
typedef struct vdev_queue vdev_queue_t;
struct abd;
extern uint_t zfs_vdev_queue_depth_pct;
extern uint_t zfs_vdev_def_queue_depth;
extern uint_t zfs_vdev_async_write_max_active;
/*
* Virtual device operations
*/
typedef int vdev_init_func_t(spa_t *spa, nvlist_t *nv, void **tsd);
typedef void vdev_kobj_post_evt_func_t(vdev_t *vd);
typedef void vdev_fini_func_t(vdev_t *vd);
typedef int vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *max_size,
uint64_t *ashift, uint64_t *pshift);
typedef void vdev_close_func_t(vdev_t *vd);
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize, uint64_t txg);
typedef uint64_t vdev_min_asize_func_t(vdev_t *vd);
typedef uint64_t vdev_min_alloc_func_t(vdev_t *vd);
typedef void vdev_io_start_func_t(zio_t *zio);
typedef void vdev_io_done_func_t(zio_t *zio);
typedef void vdev_state_change_func_t(vdev_t *vd, int, int);
typedef boolean_t vdev_need_resilver_func_t(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
typedef void vdev_hold_func_t(vdev_t *vd);
typedef void vdev_rele_func_t(vdev_t *vd);
typedef void vdev_remap_cb_t(uint64_t inner_offset, vdev_t *vd,
uint64_t offset, uint64_t size, void *arg);
typedef void vdev_remap_func_t(vdev_t *vd, uint64_t offset, uint64_t size,
vdev_remap_cb_t callback, void *arg);
/*
* Given a target vdev, translates the logical range "in" to the physical
* range "res"
*/
-typedef void vdev_xlation_func_t(vdev_t *cvd, const range_seg64_t *logical,
- range_seg64_t *physical, range_seg64_t *remain);
+typedef void vdev_xlation_func_t(vdev_t *cvd, const zfs_range_seg64_t *logical,
+ zfs_range_seg64_t *physical, zfs_range_seg64_t *remain);
typedef uint64_t vdev_rebuild_asize_func_t(vdev_t *vd, uint64_t start,
uint64_t size, uint64_t max_segment);
typedef void vdev_metaslab_init_func_t(vdev_t *vd, uint64_t *startp,
uint64_t *sizep);
typedef void vdev_config_generate_func_t(vdev_t *vd, nvlist_t *nv);
typedef uint64_t vdev_nparity_func_t(vdev_t *vd);
typedef uint64_t vdev_ndisks_func_t(vdev_t *vd);
typedef const struct vdev_ops {
vdev_init_func_t *vdev_op_init;
vdev_fini_func_t *vdev_op_fini;
vdev_open_func_t *vdev_op_open;
vdev_close_func_t *vdev_op_close;
vdev_asize_func_t *vdev_op_asize;
vdev_min_asize_func_t *vdev_op_min_asize;
vdev_min_alloc_func_t *vdev_op_min_alloc;
vdev_io_start_func_t *vdev_op_io_start;
vdev_io_done_func_t *vdev_op_io_done;
vdev_state_change_func_t *vdev_op_state_change;
vdev_need_resilver_func_t *vdev_op_need_resilver;
vdev_hold_func_t *vdev_op_hold;
vdev_rele_func_t *vdev_op_rele;
vdev_remap_func_t *vdev_op_remap;
vdev_xlation_func_t *vdev_op_xlate;
vdev_rebuild_asize_func_t *vdev_op_rebuild_asize;
vdev_metaslab_init_func_t *vdev_op_metaslab_init;
vdev_config_generate_func_t *vdev_op_config_generate;
vdev_nparity_func_t *vdev_op_nparity;
vdev_ndisks_func_t *vdev_op_ndisks;
vdev_kobj_post_evt_func_t *vdev_op_kobj_evt_post;
char vdev_op_type[16];
boolean_t vdev_op_leaf;
} vdev_ops_t;
/*
* Virtual device properties
*/
typedef union vdev_queue_class {
struct {
ulong_t vqc_list_numnodes;
list_t vqc_list;
};
avl_tree_t vqc_tree;
} vdev_queue_class_t;
struct vdev_queue {
vdev_t *vq_vdev;
vdev_queue_class_t vq_class[ZIO_PRIORITY_NUM_QUEUEABLE];
avl_tree_t vq_read_offset_tree;
avl_tree_t vq_write_offset_tree;
uint64_t vq_last_offset;
zio_priority_t vq_last_prio; /* Last sent I/O priority. */
uint32_t vq_cqueued; /* Classes with queued I/Os. */
uint32_t vq_cactive[ZIO_PRIORITY_NUM_QUEUEABLE];
uint32_t vq_active; /* Number of active I/Os. */
uint32_t vq_ia_active; /* Active interactive I/Os. */
uint32_t vq_nia_credit; /* Non-interactive I/Os credit. */
list_t vq_active_list; /* List of active I/Os. */
hrtime_t vq_io_complete_ts; /* time last i/o completed */
hrtime_t vq_io_delta_ts;
zio_t vq_io_search; /* used as local for stack reduction */
kmutex_t vq_lock;
};
typedef enum vdev_alloc_bias {
VDEV_BIAS_NONE,
VDEV_BIAS_LOG, /* dedicated to ZIL data (SLOG) */
VDEV_BIAS_SPECIAL, /* dedicated to ddt, metadata, and small blks */
VDEV_BIAS_DEDUP /* dedicated to dedup metadata */
} vdev_alloc_bias_t;
/*
* On-disk indirect vdev state.
*
* An indirect vdev is described exclusively in the MOS config of a pool.
* The config for an indirect vdev includes several fields, which are
* accessed in memory by a vdev_indirect_config_t.
*/
typedef struct vdev_indirect_config {
/*
* Object (in MOS) which contains the indirect mapping. This object
* contains an array of vdev_indirect_mapping_entry_phys_t ordered by
* vimep_src. The bonus buffer for this object is a
* vdev_indirect_mapping_phys_t. This object is allocated when a vdev
* removal is initiated.
*
* Note that this object can be empty if none of the data on the vdev
* has been copied yet.
*/
uint64_t vic_mapping_object;
/*
* Object (in MOS) which contains the birth times for the mapping
* entries. This object contains an array of
* vdev_indirect_birth_entry_phys_t sorted by vibe_offset. The bonus
* buffer for this object is a vdev_indirect_birth_phys_t. This object
* is allocated when a vdev removal is initiated.
*
* Note that this object can be empty if none of the vdev has yet been
* copied.
*/
uint64_t vic_births_object;
/*
* This is the vdev ID which was removed previous to this vdev, or
* UINT64_MAX if there are no previously removed vdevs.
*/
uint64_t vic_prev_indirect_vdev;
} vdev_indirect_config_t;
/*
* Virtual device descriptor
*/
struct vdev {
/*
* Common to all vdev types.
*/
uint64_t vdev_id; /* child number in vdev parent */
uint64_t vdev_guid; /* unique ID for this vdev */
uint64_t vdev_guid_sum; /* self guid + all child guids */
uint64_t vdev_orig_guid; /* orig. guid prior to remove */
uint64_t vdev_asize; /* allocatable device capacity */
uint64_t vdev_min_asize; /* min acceptable asize */
uint64_t vdev_max_asize; /* max acceptable asize */
uint64_t vdev_ashift; /* block alignment shift */
/*
* Logical block alignment shift
*
* The smallest sized/aligned I/O supported by the device.
*/
uint64_t vdev_logical_ashift;
/*
* Physical block alignment shift
*
* The device supports logical I/Os with vdev_logical_ashift
* size/alignment, but optimum performance will be achieved by
* aligning/sizing requests to vdev_physical_ashift. Smaller
* requests may be inflated or incur device level read-modify-write
* operations.
*
* May be 0 to indicate no preference (i.e. use vdev_logical_ashift).
*/
uint64_t vdev_physical_ashift;
uint64_t vdev_state; /* see VDEV_STATE_* #defines */
uint64_t vdev_prevstate; /* used when reopening a vdev */
vdev_ops_t *vdev_ops; /* vdev operations */
spa_t *vdev_spa; /* spa for this vdev */
void *vdev_tsd; /* type-specific data */
vdev_t *vdev_top; /* top-level vdev */
vdev_t *vdev_parent; /* parent vdev */
vdev_t **vdev_child; /* array of children */
uint64_t vdev_children; /* number of children */
vdev_stat_t vdev_stat; /* virtual device statistics */
vdev_stat_ex_t vdev_stat_ex; /* extended statistics */
boolean_t vdev_expanding; /* expand the vdev? */
boolean_t vdev_reopening; /* reopen in progress? */
boolean_t vdev_nonrot; /* true if solid state */
int vdev_load_error; /* error on last load */
int vdev_open_error; /* error on last open */
int vdev_validate_error; /* error on last validate */
kthread_t *vdev_open_thread; /* thread opening children */
kthread_t *vdev_validate_thread; /* thread validating children */
uint64_t vdev_crtxg; /* txg when top-level was added */
uint64_t vdev_root_zap;
/*
* Top-level vdev state.
*/
uint64_t vdev_ms_array; /* metaslab array object */
uint64_t vdev_ms_shift; /* metaslab size shift */
uint64_t vdev_ms_count; /* number of metaslabs */
metaslab_group_t *vdev_mg; /* metaslab group */
metaslab_group_t *vdev_log_mg; /* embedded slog metaslab group */
metaslab_t **vdev_ms; /* metaslab array */
txg_list_t vdev_ms_list; /* per-txg dirty metaslab lists */
txg_list_t vdev_dtl_list; /* per-txg dirty DTL lists */
txg_node_t vdev_txg_node; /* per-txg dirty vdev linkage */
boolean_t vdev_remove_wanted; /* async remove wanted? */
boolean_t vdev_fault_wanted; /* async faulted wanted? */
list_node_t vdev_config_dirty_node; /* config dirty list */
list_node_t vdev_state_dirty_node; /* state dirty list */
uint64_t vdev_deflate_ratio; /* deflation ratio (x512) */
uint64_t vdev_islog; /* is an intent log device */
uint64_t vdev_noalloc; /* device is passivated? */
uint64_t vdev_removing; /* device is being removed? */
uint64_t vdev_failfast; /* device failfast setting */
boolean_t vdev_rz_expanding; /* raidz is being expanded? */
boolean_t vdev_ishole; /* is a hole in the namespace */
uint64_t vdev_top_zap;
vdev_alloc_bias_t vdev_alloc_bias; /* metaslab allocation bias */
/* pool checkpoint related */
space_map_t *vdev_checkpoint_sm; /* contains reserved blocks */
/* Initialize related */
boolean_t vdev_initialize_exit_wanted;
vdev_initializing_state_t vdev_initialize_state;
list_node_t vdev_initialize_node;
kthread_t *vdev_initialize_thread;
/* Protects vdev_initialize_thread and vdev_initialize_state. */
kmutex_t vdev_initialize_lock;
kcondvar_t vdev_initialize_cv;
uint64_t vdev_initialize_offset[TXG_SIZE];
uint64_t vdev_initialize_last_offset;
- range_tree_t *vdev_initialize_tree; /* valid while initializing */
+ /* valid while initializing */
+ zfs_range_tree_t *vdev_initialize_tree;
uint64_t vdev_initialize_bytes_est;
uint64_t vdev_initialize_bytes_done;
uint64_t vdev_initialize_action_time; /* start and end time */
/* TRIM related */
boolean_t vdev_trim_exit_wanted;
boolean_t vdev_autotrim_exit_wanted;
vdev_trim_state_t vdev_trim_state;
list_node_t vdev_trim_node;
kmutex_t vdev_autotrim_lock;
kcondvar_t vdev_autotrim_cv;
kcondvar_t vdev_autotrim_kick_cv;
kthread_t *vdev_autotrim_thread;
/* Protects vdev_trim_thread and vdev_trim_state. */
kmutex_t vdev_trim_lock;
kcondvar_t vdev_trim_cv;
kthread_t *vdev_trim_thread;
uint64_t vdev_trim_offset[TXG_SIZE];
uint64_t vdev_trim_last_offset;
uint64_t vdev_trim_bytes_est;
uint64_t vdev_trim_bytes_done;
uint64_t vdev_trim_rate; /* requested rate (bytes/sec) */
uint64_t vdev_trim_partial; /* requested partial TRIM */
uint64_t vdev_trim_secure; /* requested secure TRIM */
uint64_t vdev_trim_action_time; /* start and end time */
/* Rebuild related */
boolean_t vdev_rebuilding;
boolean_t vdev_rebuild_exit_wanted;
boolean_t vdev_rebuild_cancel_wanted;
boolean_t vdev_rebuild_reset_wanted;
kmutex_t vdev_rebuild_lock;
kcondvar_t vdev_rebuild_cv;
kthread_t *vdev_rebuild_thread;
vdev_rebuild_t vdev_rebuild_config;
/* For limiting outstanding I/Os (initialize, TRIM) */
kmutex_t vdev_initialize_io_lock;
kcondvar_t vdev_initialize_io_cv;
uint64_t vdev_initialize_inflight;
kmutex_t vdev_trim_io_lock;
kcondvar_t vdev_trim_io_cv;
uint64_t vdev_trim_inflight[3];
/*
* Values stored in the config for an indirect or removing vdev.
*/
vdev_indirect_config_t vdev_indirect_config;
/*
* The vdev_indirect_rwlock protects the vdev_indirect_mapping
* pointer from changing on indirect vdevs (when it is condensed).
* Note that removing (not yet indirect) vdevs have different
* access patterns (the mapping is not accessed from open context,
* e.g. from zio_read) and locking strategy (e.g. svr_lock).
*/
krwlock_t vdev_indirect_rwlock;
vdev_indirect_mapping_t *vdev_indirect_mapping;
vdev_indirect_births_t *vdev_indirect_births;
/*
* In memory data structures used to manage the obsolete sm, for
* indirect or removing vdevs.
*
* The vdev_obsolete_segments is the in-core record of the segments
* that are no longer referenced anywhere in the pool (due to
* being freed or remapped and not referenced by any snapshots).
* During a sync, segments are added to vdev_obsolete_segments
* via vdev_indirect_mark_obsolete(); at the end of each sync
* pass, this is appended to vdev_obsolete_sm via
* vdev_indirect_sync_obsolete(). The vdev_obsolete_lock
* protects against concurrent modifications of vdev_obsolete_segments
* from multiple zio threads.
*/
kmutex_t vdev_obsolete_lock;
- range_tree_t *vdev_obsolete_segments;
+ zfs_range_tree_t *vdev_obsolete_segments;
space_map_t *vdev_obsolete_sm;
/*
* Protects the vdev_scan_io_queue field itself as well as the
* structure's contents (when present).
*/
kmutex_t vdev_scan_io_queue_lock;
struct dsl_scan_io_queue *vdev_scan_io_queue;
/*
* Leaf vdev state.
*/
- range_tree_t *vdev_dtl[DTL_TYPES]; /* dirty time logs */
+ zfs_range_tree_t *vdev_dtl[DTL_TYPES]; /* dirty time logs */
space_map_t *vdev_dtl_sm; /* dirty time log space map */
txg_node_t vdev_dtl_node; /* per-txg dirty DTL linkage */
uint64_t vdev_dtl_object; /* DTL object */
uint64_t vdev_psize; /* physical device capacity */
uint64_t vdev_wholedisk; /* true if this is a whole disk */
uint64_t vdev_offline; /* persistent offline state */
uint64_t vdev_faulted; /* persistent faulted state */
uint64_t vdev_degraded; /* persistent degraded state */
uint64_t vdev_removed; /* persistent removed state */
uint64_t vdev_resilver_txg; /* persistent resilvering state */
uint64_t vdev_rebuild_txg; /* persistent rebuilding state */
char *vdev_path; /* vdev path (if any) */
char *vdev_devid; /* vdev devid (if any) */
char *vdev_physpath; /* vdev device path (if any) */
char *vdev_enc_sysfs_path; /* enclosure sysfs path */
char *vdev_fru; /* physical FRU location */
uint64_t vdev_not_present; /* not present during import */
uint64_t vdev_unspare; /* unspare when resilvering done */
boolean_t vdev_nowritecache; /* true if flushwritecache failed */
boolean_t vdev_has_trim; /* TRIM is supported */
boolean_t vdev_has_securetrim; /* secure TRIM is supported */
boolean_t vdev_checkremove; /* temporary online test */
boolean_t vdev_forcefault; /* force online fault */
boolean_t vdev_splitting; /* split or repair in progress */
boolean_t vdev_delayed_close; /* delayed device close? */
boolean_t vdev_tmpoffline; /* device taken offline temporarily? */
boolean_t vdev_detached; /* device detached? */
boolean_t vdev_cant_read; /* vdev is failing all reads */
boolean_t vdev_cant_write; /* vdev is failing all writes */
boolean_t vdev_isspare; /* was a hot spare */
boolean_t vdev_isl2cache; /* was a l2cache device */
boolean_t vdev_copy_uberblocks; /* post expand copy uberblocks */
boolean_t vdev_resilver_deferred; /* resilver deferred */
boolean_t vdev_kobj_flag; /* kobj event record */
boolean_t vdev_attaching; /* vdev attach ashift handling */
vdev_queue_t vdev_queue; /* I/O deadline schedule queue */
spa_aux_vdev_t *vdev_aux; /* for l2cache and spares vdevs */
zio_t *vdev_probe_zio; /* root of current probe */
vdev_aux_t vdev_label_aux; /* on-disk aux state */
uint64_t vdev_leaf_zap;
hrtime_t vdev_mmp_pending; /* 0 if write finished */
uint64_t vdev_mmp_kstat_id; /* to find kstat entry */
uint64_t vdev_expansion_time; /* vdev's last expansion time */
list_node_t vdev_leaf_node; /* leaf vdev list */
/*
* For DTrace to work in userland (libzpool) context, these fields must
* remain at the end of the structure. DTrace will use the kernel's
* CTF definition for 'struct vdev', and since the size of a kmutex_t is
* larger in userland, the offsets for the rest of the fields would be
* incorrect.
*/
kmutex_t vdev_dtl_lock; /* vdev_dtl_{map,resilver} */
kmutex_t vdev_stat_lock; /* vdev_stat */
kmutex_t vdev_probe_lock; /* protects vdev_probe_zio */
/*
* We rate limit ZIO delay, deadman, and checksum events, since they
* can flood ZED with tons of events when a drive is acting up.
*
* We also rate limit Direct I/O write verify errors, since a user might
* be continually manipulating a buffer that can flood ZED with tons of
* events.
*/
zfs_ratelimit_t vdev_delay_rl;
zfs_ratelimit_t vdev_deadman_rl;
zfs_ratelimit_t vdev_dio_verify_rl;
zfs_ratelimit_t vdev_checksum_rl;
/*
* Vdev properties for tuning ZED or zfsd
*/
uint64_t vdev_checksum_n;
uint64_t vdev_checksum_t;
uint64_t vdev_io_n;
uint64_t vdev_io_t;
uint64_t vdev_slow_io_n;
uint64_t vdev_slow_io_t;
};
#define VDEV_PAD_SIZE (8 << 10)
/* 2 padding areas (vl_pad1 and vl_be) to skip */
#define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2
#define VDEV_PHYS_SIZE (112 << 10)
#define VDEV_UBERBLOCK_RING (128 << 10)
/*
* MMP blocks occupy the last MMP_BLOCKS_PER_LABEL slots in the uberblock
* ring when MMP is enabled.
*/
#define MMP_BLOCKS_PER_LABEL 1
/* The largest uberblock we support is 8k. */
#define MAX_UBERBLOCK_SHIFT (13)
#define VDEV_UBERBLOCK_SHIFT(vd) \
MIN(MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT), \
MAX_UBERBLOCK_SHIFT)
#define VDEV_UBERBLOCK_COUNT(vd) \
(VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
#define VDEV_UBERBLOCK_OFFSET(vd, n) \
offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
#define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
typedef struct vdev_phys {
char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
zio_eck_t vp_zbt;
} vdev_phys_t;
typedef enum vbe_vers {
/*
* The bootenv file is stored as ascii text in the envblock.
* It is used by the GRUB bootloader used on Linux to store the
* contents of the grubenv file. The file is stored as raw ASCII,
* and is protected by an embedded checksum. By default, GRUB will
* check if the boot filesystem supports storing the environment data
* in a special location, and if so, will invoke filesystem specific
* logic to retrieve it. This can be overridden by a variable, should
* the user so desire.
*/
VB_RAW = 0,
/*
* The bootenv file is converted to an nvlist and then packed into the
* envblock.
*/
VB_NVLIST = 1
} vbe_vers_t;
typedef struct vdev_boot_envblock {
uint64_t vbe_version;
char vbe_bootenv[VDEV_PAD_SIZE - sizeof (uint64_t) -
sizeof (zio_eck_t)];
zio_eck_t vbe_zbt;
} vdev_boot_envblock_t;
_Static_assert(sizeof (vdev_boot_envblock_t) == VDEV_PAD_SIZE,
"vdev_boot_envblock_t wrong size");
typedef struct vdev_label {
char vl_pad1[VDEV_PAD_SIZE]; /* 8K */
vdev_boot_envblock_t vl_be; /* 8K */
vdev_phys_t vl_vdev_phys; /* 112K */
char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */
} vdev_label_t; /* 256K total */
/*
* vdev_dirty() flags
*/
#define VDD_METASLAB 0x01
#define VDD_DTL 0x02
/* Offset of embedded boot loader region on each label */
#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
/*
* Size of embedded boot loader region on each label.
* The total size of the first two labels plus the boot area is 4MB.
* On RAIDZ, this space is overwritten during RAIDZ expansion.
*/
#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
/*
* Size of label regions at the start and end of each leaf device.
*/
#define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
#define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t))
#define VDEV_LABELS 4
#define VDEV_BEST_LABEL VDEV_LABELS
#define VDEV_OFFSET_IS_LABEL(vd, off) \
(((off) < VDEV_LABEL_START_SIZE) || \
((off) >= ((vd)->vdev_psize - VDEV_LABEL_END_SIZE)))
#define VDEV_ALLOC_LOAD 0
#define VDEV_ALLOC_ADD 1
#define VDEV_ALLOC_SPARE 2
#define VDEV_ALLOC_L2CACHE 3
#define VDEV_ALLOC_ROOTPOOL 4
#define VDEV_ALLOC_SPLIT 5
#define VDEV_ALLOC_ATTACH 6
/*
* Allocate or free a vdev
*/
extern vdev_t *vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid,
vdev_ops_t *ops);
extern int vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *config,
vdev_t *parent, uint_t id, int alloctype);
extern void vdev_free(vdev_t *vd);
/*
* Add or remove children and parents
*/
extern void vdev_add_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_remove_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_compact_children(vdev_t *pvd);
extern vdev_t *vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops);
extern void vdev_remove_parent(vdev_t *cvd);
/*
* vdev sync load and sync
*/
extern boolean_t vdev_log_state_valid(vdev_t *vd);
extern int vdev_load(vdev_t *vd);
extern int vdev_dtl_load(vdev_t *vd);
extern void vdev_sync(vdev_t *vd, uint64_t txg);
extern void vdev_sync_done(vdev_t *vd, uint64_t txg);
extern void vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg);
extern void vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg);
/*
* Available vdev types.
*/
extern vdev_ops_t vdev_root_ops;
extern vdev_ops_t vdev_mirror_ops;
extern vdev_ops_t vdev_replacing_ops;
extern vdev_ops_t vdev_raidz_ops;
extern vdev_ops_t vdev_draid_ops;
extern vdev_ops_t vdev_draid_spare_ops;
extern vdev_ops_t vdev_disk_ops;
extern vdev_ops_t vdev_file_ops;
extern vdev_ops_t vdev_missing_ops;
extern vdev_ops_t vdev_hole_ops;
extern vdev_ops_t vdev_spare_ops;
extern vdev_ops_t vdev_indirect_ops;
/*
* Common size functions
*/
-extern void vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
- range_seg64_t *physical_rs, range_seg64_t *remain_rs);
+extern void vdev_default_xlate(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
+ zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs);
extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize, uint64_t txg);
extern uint64_t vdev_default_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_asize(vdev_t *vd);
extern void vdev_set_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_alloc(vdev_t *vd);
extern uint64_t vdev_get_nparity(vdev_t *vd);
extern uint64_t vdev_get_ndisks(vdev_t *vd);
/*
* Global variables
*/
extern int zfs_vdev_standard_sm_blksz;
/*
* Functions from vdev_indirect.c
*/
extern void vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx);
extern boolean_t vdev_indirect_should_condense(vdev_t *vd);
extern void spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx);
extern int vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj);
extern int vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise);
/*
* Other miscellaneous functions
*/
int vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj);
void vdev_metaslab_group_create(vdev_t *vd);
uint64_t vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b);
+#if defined(__linux__)
+int param_get_raidz_impl(char *buf, zfs_kernel_param_t *kp);
+#endif
+int param_set_raidz_impl(ZFS_MODULE_PARAM_ARGS);
/*
* Vdev ashift optimization tunables
*/
extern uint_t zfs_vdev_min_auto_ashift;
extern uint_t zfs_vdev_max_auto_ashift;
int param_set_min_auto_ashift(ZFS_MODULE_PARAM_ARGS);
int param_set_max_auto_ashift(ZFS_MODULE_PARAM_ARGS);
/*
* VDEV checksum verification for Direct I/O writes
*/
extern uint_t zfs_vdev_direct_write_verify;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_raidz.h b/sys/contrib/openzfs/include/sys/vdev_raidz.h
index 64f484e9aa13..ed042aedbdbc 100644
--- a/sys/contrib/openzfs/include/sys/vdev_raidz.h
+++ b/sys/contrib/openzfs/include/sys/vdev_raidz.h
@@ -1,176 +1,179 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Neskovic <neskovic@compeng.uni-frankfurt.de>.
*/
#ifndef _SYS_VDEV_RAIDZ_H
#define _SYS_VDEV_RAIDZ_H
#include <sys/types.h>
#include <sys/zfs_rlock.h>
#ifdef __cplusplus
extern "C" {
#endif
struct zio;
struct raidz_col;
struct raidz_row;
struct raidz_map;
struct vdev_raidz;
struct uberblock;
#if !defined(_KERNEL)
struct kernel_param {};
#endif
/*
* vdev_raidz interface
*/
struct raidz_map *vdev_raidz_map_alloc(struct zio *, uint64_t, uint64_t,
uint64_t);
struct raidz_map *vdev_raidz_map_alloc_expanded(struct zio *,
uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, boolean_t);
void vdev_raidz_map_free(struct raidz_map *);
void vdev_raidz_free(struct vdev_raidz *);
void vdev_raidz_generate_parity_row(struct raidz_map *, struct raidz_row *);
void vdev_raidz_generate_parity(struct raidz_map *);
void vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
void vdev_raidz_child_done(zio_t *);
void vdev_raidz_io_done(zio_t *);
void vdev_raidz_checksum_error(zio_t *, struct raidz_col *, abd_t *);
struct raidz_row *vdev_raidz_row_alloc(int, zio_t *);
void vdev_raidz_reflow_copy_scratch(spa_t *);
void raidz_dtl_reassessed(vdev_t *);
extern const zio_vsd_ops_t vdev_raidz_vsd_ops;
/*
* vdev_raidz_math interface
*/
+/* Required, but not used, by ZFS_MODULE_PARAM_CALL */
+extern uint32_t zfs_vdev_raidz_impl;
void vdev_raidz_math_init(void);
void vdev_raidz_math_fini(void);
const struct raidz_impl_ops *vdev_raidz_math_get_ops(void);
int vdev_raidz_math_generate(struct raidz_map *, struct raidz_row *);
int vdev_raidz_math_reconstruct(struct raidz_map *, struct raidz_row *,
const int *, const int *, const int);
int vdev_raidz_impl_set(const char *);
+int vdev_raidz_impl_get(char *buffer, size_t size);
typedef struct vdev_raidz_expand {
uint64_t vre_vdev_id;
kmutex_t vre_lock;
kcondvar_t vre_cv;
/*
* How much i/o is outstanding (issued and not completed).
*/
uint64_t vre_outstanding_bytes;
/*
* Next offset to issue i/o for.
*/
uint64_t vre_offset;
/*
* Lowest offset of a failed expansion i/o. The expansion will retry
* from here. Once the expansion thread notices the failure and exits,
* vre_failed_offset is reset back to UINT64_MAX, and
* vre_waiting_for_resilver will be set.
*/
uint64_t vre_failed_offset;
boolean_t vre_waiting_for_resilver;
/*
* Offset that is completing each txg
*/
uint64_t vre_offset_pertxg[TXG_SIZE];
/*
* Bytes copied in each txg.
*/
uint64_t vre_bytes_copied_pertxg[TXG_SIZE];
/*
* The rangelock prevents normal read/write zio's from happening while
* there are expansion (reflow) i/os in progress to the same offsets.
*/
zfs_rangelock_t vre_rangelock;
/*
* These fields are stored on-disk in the vdev_top_zap:
*/
dsl_scan_state_t vre_state;
uint64_t vre_start_time;
uint64_t vre_end_time;
uint64_t vre_bytes_copied;
} vdev_raidz_expand_t;
typedef struct vdev_raidz {
/*
* Number of child vdevs when this raidz vdev was created (i.e. before
* any raidz expansions).
*/
int vd_original_width;
/*
* The current number of child vdevs, which may be more than the
* original width if an expansion is in progress or has completed.
*/
int vd_physical_width;
int vd_nparity;
/*
* Tree of reflow_node_t's. The lock protects the avl tree only.
* The reflow_node_t's describe completed expansions, and are used
* to determine the logical width given a block's birth time.
*/
avl_tree_t vd_expand_txgs;
kmutex_t vd_expand_lock;
/*
* If this vdev is being expanded, spa_raidz_expand is set to this
*/
vdev_raidz_expand_t vn_vre;
} vdev_raidz_t;
extern int vdev_raidz_attach_check(vdev_t *);
extern void vdev_raidz_attach_sync(void *, dmu_tx_t *);
extern void spa_start_raidz_expansion_thread(spa_t *);
extern int spa_raidz_expand_get_stats(spa_t *, pool_raidz_expand_stat_t *);
extern int vdev_raidz_load(vdev_t *);
/* RAIDZ scratch area pause points (for testing) */
#define RAIDZ_EXPAND_PAUSE_NONE 0
#define RAIDZ_EXPAND_PAUSE_PRE_SCRATCH_1 1
#define RAIDZ_EXPAND_PAUSE_PRE_SCRATCH_2 2
#define RAIDZ_EXPAND_PAUSE_PRE_SCRATCH_3 3
#define RAIDZ_EXPAND_PAUSE_SCRATCH_VALID 4
#define RAIDZ_EXPAND_PAUSE_SCRATCH_REFLOWED 5
#define RAIDZ_EXPAND_PAUSE_SCRATCH_POST_REFLOW_1 6
#define RAIDZ_EXPAND_PAUSE_SCRATCH_POST_REFLOW_2 7
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_RAIDZ_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_rebuild.h b/sys/contrib/openzfs/include/sys/vdev_rebuild.h
index 55ec6c570316..b7664a822bb3 100644
--- a/sys/contrib/openzfs/include/sys/vdev_rebuild.h
+++ b/sys/contrib/openzfs/include/sys/vdev_rebuild.h
@@ -1,102 +1,103 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018, Intel Corporation.
* Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
*/
#ifndef _SYS_VDEV_REBUILD_H
#define _SYS_VDEV_REBUILD_H
#include <sys/spa.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Number of entries in the physical vdev_rebuild_phys structure. This
* state is stored per top-level as VDEV_ZAP_TOP_VDEV_REBUILD_PHYS.
*/
#define REBUILD_PHYS_ENTRIES 12
/*
* On-disk rebuild configuration and state. When adding new fields they
* must be added to the end of the structure.
*/
typedef struct vdev_rebuild_phys {
uint64_t vrp_rebuild_state; /* vdev_rebuild_state_t */
uint64_t vrp_last_offset; /* last rebuilt offset */
uint64_t vrp_min_txg; /* minimum missing txg */
uint64_t vrp_max_txg; /* maximum missing txg */
uint64_t vrp_start_time; /* start time */
uint64_t vrp_end_time; /* end time */
uint64_t vrp_scan_time_ms; /* total run time in ms */
uint64_t vrp_bytes_scanned; /* alloc bytes scanned */
uint64_t vrp_bytes_issued; /* read bytes rebuilt */
uint64_t vrp_bytes_rebuilt; /* rebuilt bytes */
uint64_t vrp_bytes_est; /* total bytes to scan */
uint64_t vrp_errors; /* errors during rebuild */
} vdev_rebuild_phys_t;
/*
* The vdev_rebuild_t describes the current state and how a top-level vdev
* should be rebuilt. The core elements are the top-vdev, the metaslab being
* rebuilt, range tree containing the allocated extents and the on-disk state.
*/
typedef struct vdev_rebuild {
vdev_t *vr_top_vdev; /* top-level vdev to rebuild */
metaslab_t *vr_scan_msp; /* scanning disabled metaslab */
- range_tree_t *vr_scan_tree; /* scan ranges (in metaslab) */
+ /* scan ranges (in metaslab) */
+ zfs_range_tree_t *vr_scan_tree;
kmutex_t vr_io_lock; /* inflight IO lock */
kcondvar_t vr_io_cv; /* inflight IO cv */
/* In-core state and progress */
uint64_t vr_scan_offset[TXG_SIZE];
uint64_t vr_prev_scan_time_ms; /* any previous scan time */
uint64_t vr_bytes_inflight_max; /* maximum bytes inflight */
uint64_t vr_bytes_inflight; /* current bytes inflight */
/* Per-rebuild pass statistics for calculating bandwidth */
uint64_t vr_pass_start_time;
uint64_t vr_pass_bytes_scanned;
uint64_t vr_pass_bytes_issued;
uint64_t vr_pass_bytes_skipped;
/* On-disk state updated by vdev_rebuild_zap_update_sync() */
vdev_rebuild_phys_t vr_rebuild_phys;
} vdev_rebuild_t;
boolean_t vdev_rebuild_active(vdev_t *);
int vdev_rebuild_load(vdev_t *);
void vdev_rebuild(vdev_t *);
void vdev_rebuild_stop_wait(vdev_t *);
void vdev_rebuild_stop_all(spa_t *);
void vdev_rebuild_restart(spa_t *);
void vdev_rebuild_clear_sync(void *, dmu_tx_t *);
int vdev_rebuild_get_stats(vdev_t *, vdev_rebuild_stat_t *);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_REBUILD_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_removal.h b/sys/contrib/openzfs/include/sys/vdev_removal.h
index 70b743f4ec6b..8e6005a94260 100644
--- a/sys/contrib/openzfs/include/sys/vdev_removal.h
+++ b/sys/contrib/openzfs/include/sys/vdev_removal.h
@@ -1,96 +1,96 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014, 2019 by Delphix. All rights reserved.
*/
#ifndef _SYS_VDEV_REMOVAL_H
#define _SYS_VDEV_REMOVAL_H
#include <sys/spa.h>
#include <sys/bpobj.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct spa_vdev_removal {
uint64_t svr_vdev_id;
uint64_t svr_max_offset_to_sync[TXG_SIZE];
/* Thread performing a vdev removal. */
kthread_t *svr_thread;
/* Segments left to copy from the current metaslab. */
- range_tree_t *svr_allocd_segs;
+ zfs_range_tree_t *svr_allocd_segs;
kmutex_t svr_lock;
kcondvar_t svr_cv;
boolean_t svr_thread_exit;
/*
* New mappings to write out each txg.
*/
list_t svr_new_segments[TXG_SIZE];
/*
* Ranges that were freed while a mapping was in flight. This is
* a subset of the ranges covered by vdev_im_new_segments.
*/
- range_tree_t *svr_frees[TXG_SIZE];
+ zfs_range_tree_t *svr_frees[TXG_SIZE];
/*
* Number of bytes which we have finished our work for
* in each txg. This could be data copied (which will be part of
* the mappings in vdev_im_new_segments), or data freed before
* we got around to copying it.
*/
uint64_t svr_bytes_done[TXG_SIZE];
/* List of leaf zap objects to be unlinked */
nvlist_t *svr_zaplist;
} spa_vdev_removal_t;
typedef struct spa_condensing_indirect {
/*
* New mappings to write out each txg.
*/
list_t sci_new_mapping_entries[TXG_SIZE];
vdev_indirect_mapping_t *sci_new_mapping;
} spa_condensing_indirect_t;
extern int spa_remove_init(spa_t *);
extern void spa_restart_removal(spa_t *);
extern int spa_condense_init(spa_t *);
extern void spa_condense_fini(spa_t *);
extern void spa_start_indirect_condensing_thread(spa_t *);
extern void spa_vdev_condense_suspend(spa_t *);
extern int spa_vdev_remove(spa_t *, uint64_t, boolean_t);
extern void free_from_removing_vdev(vdev_t *, uint64_t, uint64_t);
extern int spa_removal_get_stats(spa_t *, pool_removal_stat_t *);
extern void svr_sync(spa_t *, dmu_tx_t *);
extern void spa_vdev_remove_suspend(spa_t *);
extern int spa_vdev_remove_cancel(spa_t *);
extern void spa_vdev_removal_destroy(spa_vdev_removal_t *);
extern uint64_t spa_remove_max_segment(spa_t *);
extern uint_t vdev_removal_max_span;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_REMOVAL_H */
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c b/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c
index 231bbbd92dbf..06fa52b00e05 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c
@@ -1,5674 +1,5675 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2019 Joyent, Inc.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012 DEY Storage Systems, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright 2017-2018 RackTop Systems.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021 Matt Fiddaman
*/
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <stddef.h>
#include <zone.h>
#include <fcntl.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <pwd.h>
#include <grp.h>
#ifdef HAVE_IDMAP
#include <idmap.h>
#include <aclutils.h>
#include <directory.h>
#endif /* HAVE_IDMAP */
#include <sys/dnode.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/dsl_crypt.h>
#include <libzfs.h>
#include <libzutil.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_deleg.h"
static __thread struct passwd gpwd;
static __thread struct group ggrp;
static __thread char rpbuf[2048];
static int userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp);
/*
* Given a single type (not a mask of types), return the type in a human
* readable form.
*/
const char *
zfs_type_to_name(zfs_type_t type)
{
switch (type) {
case ZFS_TYPE_FILESYSTEM:
return (dgettext(TEXT_DOMAIN, "filesystem"));
case ZFS_TYPE_SNAPSHOT:
return (dgettext(TEXT_DOMAIN, "snapshot"));
case ZFS_TYPE_VOLUME:
return (dgettext(TEXT_DOMAIN, "volume"));
case ZFS_TYPE_POOL:
return (dgettext(TEXT_DOMAIN, "pool"));
case ZFS_TYPE_BOOKMARK:
return (dgettext(TEXT_DOMAIN, "bookmark"));
default:
assert(!"unhandled zfs_type_t");
}
return (NULL);
}
/*
* Validate a ZFS path. This is used even before trying to open the dataset, to
* provide a more meaningful error message. We call zfs_error_aux() to
* explain exactly why the name was not valid.
*/
int
zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
boolean_t modifying)
{
namecheck_err_t why;
char what;
if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshot delimiter '@' is not expected here"));
return (0);
}
if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing '@' delimiter in snapshot name"));
return (0);
}
if (!(type & ZFS_TYPE_BOOKMARK) && strchr(path, '#') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bookmark delimiter '#' is not expected here"));
return (0);
}
if (type == ZFS_TYPE_BOOKMARK && strchr(path, '#') == NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing '#' delimiter in bookmark name"));
return (0);
}
if (modifying && strchr(path, '%') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid character %c in name"), '%');
return (0);
}
if (entity_namecheck(path, &why, &what) != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is too long"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component or misplaced '@'"
" or '#' delimiter in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in name"), what);
break;
case NAME_ERR_MULTIPLE_DELIMITERS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' and/or '#' delimiters in "
"name"));
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool doesn't begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"reserved disk name"));
break;
case NAME_ERR_SELF_REF:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"self reference, '.' is found in name"));
break;
case NAME_ERR_PARENT_REF:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent reference, '..' is found in name"));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"(%d) not defined"), why);
break;
}
}
return (0);
}
return (-1);
}
int
zfs_name_valid(const char *name, zfs_type_t type)
{
if (type == ZFS_TYPE_POOL)
return (zpool_name_valid(NULL, B_FALSE, name));
return (zfs_validate_name(NULL, name, type, B_FALSE));
}
/*
* This function takes the raw DSL properties, and filters out the user-defined
* properties into a separate nvlist.
*/
static nvlist_t *
process_user_props(zfs_handle_t *zhp, nvlist_t *props)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvpair_t *elem;
nvlist_t *nvl;
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
if (!zfs_prop_user(nvpair_name(elem)))
continue;
nvlist_t *propval = fnvpair_value_nvlist(elem);
if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) {
nvlist_free(nvl);
(void) no_memory(hdl);
return (NULL);
}
}
return (nvl);
}
static zpool_handle_t *
zpool_add_handle(zfs_handle_t *zhp, const char *pool_name)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph;
if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) {
if (hdl->libzfs_pool_handles != NULL)
zph->zpool_next = hdl->libzfs_pool_handles;
hdl->libzfs_pool_handles = zph;
}
return (zph);
}
static zpool_handle_t *
zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph = hdl->libzfs_pool_handles;
while ((zph != NULL) &&
(strncmp(pool_name, zpool_get_name(zph), len) != 0))
zph = zph->zpool_next;
return (zph);
}
/*
* Returns a handle to the pool that contains the provided dataset.
* If a handle to that pool already exists then that handle is returned.
* Otherwise, a new handle is created and added to the list of handles.
*/
static zpool_handle_t *
zpool_handle(zfs_handle_t *zhp)
{
char *pool_name;
int len;
zpool_handle_t *zph;
len = strcspn(zhp->zfs_name, "/@#") + 1;
pool_name = zfs_alloc(zhp->zfs_hdl, len);
(void) strlcpy(pool_name, zhp->zfs_name, len);
zph = zpool_find_handle(zhp, pool_name, len);
if (zph == NULL)
zph = zpool_add_handle(zhp, pool_name);
free(pool_name);
return (zph);
}
void
zpool_free_handles(libzfs_handle_t *hdl)
{
zpool_handle_t *next, *zph = hdl->libzfs_pool_handles;
while (zph != NULL) {
next = zph->zpool_next;
zpool_close(zph);
zph = next;
}
hdl->libzfs_pool_handles = NULL;
}
/*
* Utility function to gather stats (objset and zpl) for the given object.
*/
static int
get_stats_ioctl(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
(void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name));
while (zfs_ioctl(hdl, ZFS_IOC_OBJSET_STATS, zc) != 0) {
if (errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, zc);
else
return (-1);
}
return (0);
}
/*
* Utility function to get the received properties of the given object.
*/
static int
get_recvd_props_ioctl(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *recvdprops;
zfs_cmd_t zc = {"\0"};
int err;
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
while (zfs_ioctl(hdl, ZFS_IOC_OBJSET_RECVD_PROPS, &zc) != 0) {
if (errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
err = zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &recvdprops);
zcmd_free_nvlists(&zc);
if (err != 0)
return (-1);
nvlist_free(zhp->zfs_recvd_props);
zhp->zfs_recvd_props = recvdprops;
return (0);
}
static int
put_stats_zhdl(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
nvlist_t *allprops, *userprops;
zhp->zfs_dmustats = zc->zc_objset_stats; /* structure assignment */
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, zc, &allprops) != 0) {
return (-1);
}
/*
* XXX Why do we store the user props separately, in addition to
* storing them in zfs_props?
*/
if ((userprops = process_user_props(zhp, allprops)) == NULL) {
nvlist_free(allprops);
return (-1);
}
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
zhp->zfs_props = allprops;
zhp->zfs_user_props = userprops;
return (0);
}
static int
get_stats(zfs_handle_t *zhp)
{
int rc = 0;
zfs_cmd_t zc = {"\0"};
zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0);
if (get_stats_ioctl(zhp, &zc) != 0)
rc = -1;
else if (put_stats_zhdl(zhp, &zc) != 0)
rc = -1;
zcmd_free_nvlists(&zc);
return (rc);
}
/*
* Refresh the properties currently stored in the handle.
*/
void
zfs_refresh_properties(zfs_handle_t *zhp)
{
(void) get_stats(zhp);
}
/*
* Makes a handle from the given dataset name. Used by zfs_open() and
* zfs_iter_* to create child handles on the fly.
*/
static int
make_dataset_handle_common(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
if (put_stats_zhdl(zhp, zc) != 0)
return (-1);
/*
* We've managed to open the dataset and gather statistics. Determine
* the high-level type.
*/
if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) {
zhp->zfs_head_type = ZFS_TYPE_VOLUME;
} else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS) {
zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM;
} else if (zhp->zfs_dmustats.dds_type == DMU_OST_OTHER) {
errno = EINVAL;
return (-1);
} else if (zhp->zfs_dmustats.dds_inconsistent) {
errno = EBUSY;
return (-1);
} else {
abort();
}
if (zhp->zfs_dmustats.dds_is_snapshot)
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
else
abort(); /* we should never see any other types */
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL)
return (-1);
return (0);
}
zfs_handle_t *
make_dataset_handle(libzfs_handle_t *hdl, const char *path)
{
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
if (get_stats_ioctl(zhp, &zc) == -1) {
zcmd_free_nvlists(&zc);
free(zhp);
return (NULL);
}
if (make_dataset_handle_common(zhp, &zc) == -1) {
free(zhp);
zhp = NULL;
}
zcmd_free_nvlists(&zc);
return (zhp);
}
zfs_handle_t *
make_dataset_handle_zc(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
if (make_dataset_handle_common(zhp, zc) == -1) {
free(zhp);
return (NULL);
}
return (zhp);
}
zfs_handle_t *
make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = pzhp->zfs_hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
zhp->zfs_head_type = pzhp->zfs_type;
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
zhp->zpool_hdl = zpool_handle(zhp);
if (zc->zc_objset_stats.dds_creation_txg != 0) {
/* structure assignment */
zhp->zfs_dmustats = zc->zc_objset_stats;
} else {
if (get_stats_ioctl(zhp, zc) == -1) {
zcmd_free_nvlists(zc);
free(zhp);
return (NULL);
}
if (make_dataset_handle_common(zhp, zc) == -1) {
zcmd_free_nvlists(zc);
free(zhp);
return (NULL);
}
}
if (zhp->zfs_dmustats.dds_is_snapshot ||
strchr(zc->zc_name, '@') != NULL)
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
return (zhp);
}
zfs_handle_t *
zfs_handle_dup(zfs_handle_t *zhp_orig)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = zhp_orig->zfs_hdl;
zhp->zpool_hdl = zhp_orig->zpool_hdl;
(void) strlcpy(zhp->zfs_name, zhp_orig->zfs_name,
sizeof (zhp->zfs_name));
zhp->zfs_type = zhp_orig->zfs_type;
zhp->zfs_head_type = zhp_orig->zfs_head_type;
zhp->zfs_dmustats = zhp_orig->zfs_dmustats;
if (zhp_orig->zfs_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_props, &zhp->zfs_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_user_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_user_props,
&zhp->zfs_user_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_recvd_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_recvd_props,
&zhp->zfs_recvd_props, 0)) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
zhp->zfs_mntcheck = zhp_orig->zfs_mntcheck;
if (zhp_orig->zfs_mntopts != NULL) {
zhp->zfs_mntopts = zfs_strdup(zhp_orig->zfs_hdl,
zhp_orig->zfs_mntopts);
}
zhp->zfs_props_table = zhp_orig->zfs_props_table;
return (zhp);
}
boolean_t
zfs_bookmark_exists(const char *path)
{
nvlist_t *bmarks;
nvlist_t *props;
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *bmark_name;
char *pound;
int err;
boolean_t rv;
(void) strlcpy(fsname, path, sizeof (fsname));
pound = strchr(fsname, '#');
if (pound == NULL)
return (B_FALSE);
*pound = '\0';
bmark_name = pound + 1;
props = fnvlist_alloc();
err = lzc_get_bookmarks(fsname, props, &bmarks);
nvlist_free(props);
if (err != 0) {
nvlist_free(bmarks);
return (B_FALSE);
}
rv = nvlist_exists(bmarks, bmark_name);
nvlist_free(bmarks);
return (rv);
}
zfs_handle_t *
make_bookmark_handle(zfs_handle_t *parent, const char *path,
nvlist_t *bmark_props)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
/* Fill in the name. */
zhp->zfs_hdl = parent->zfs_hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
/* Set the property lists. */
if (nvlist_dup(bmark_props, &zhp->zfs_props, 0) != 0) {
free(zhp);
return (NULL);
}
/* Set the types. */
zhp->zfs_head_type = parent->zfs_head_type;
zhp->zfs_type = ZFS_TYPE_BOOKMARK;
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL) {
nvlist_free(zhp->zfs_props);
free(zhp);
return (NULL);
}
return (zhp);
}
struct zfs_open_bookmarks_cb_data {
const char *path;
zfs_handle_t *zhp;
};
static int
zfs_open_bookmarks_cb(zfs_handle_t *zhp, void *data)
{
struct zfs_open_bookmarks_cb_data *dp = data;
/*
* Is it the one we are looking for?
*/
if (strcmp(dp->path, zfs_get_name(zhp)) == 0) {
/*
* We found it. Save it and let the caller know we are done.
*/
dp->zhp = zhp;
return (EEXIST);
}
/*
* Not found. Close the handle and ask for another one.
*/
zfs_close(zhp);
return (0);
}
/*
* Opens the given snapshot, bookmark, filesystem, or volume. The 'types'
* argument is a mask of acceptable types. The function will print an
* appropriate error message and return NULL if it can't be opened.
*/
zfs_handle_t *
zfs_open(libzfs_handle_t *hdl, const char *path, int types)
{
zfs_handle_t *zhp;
char errbuf[ERRBUFLEN];
char *bookp;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
/*
* Validate the name before we even try to open it.
*/
if (!zfs_validate_name(hdl, path, types, B_FALSE)) {
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
errno = EINVAL;
return (NULL);
}
/*
* Bookmarks needs to be handled separately.
*/
bookp = strchr(path, '#');
if (bookp == NULL) {
/*
* Try to get stats for the dataset, which will tell us if it
* exists.
*/
errno = 0;
if ((zhp = make_dataset_handle(hdl, path)) == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
return (NULL);
}
} else {
char dsname[ZFS_MAX_DATASET_NAME_LEN];
zfs_handle_t *pzhp;
struct zfs_open_bookmarks_cb_data cb_data = {path, NULL};
/*
* We need to cut out '#' and everything after '#'
* to get the parent dataset name only.
*/
assert(bookp - path < sizeof (dsname));
(void) strlcpy(dsname, path,
MIN(sizeof (dsname), bookp - path + 1));
/*
* Create handle for the parent dataset.
*/
errno = 0;
if ((pzhp = make_dataset_handle(hdl, dsname)) == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
return (NULL);
}
/*
* Iterate bookmarks to find the right one.
*/
errno = 0;
if ((zfs_iter_bookmarks_v2(pzhp, 0, zfs_open_bookmarks_cb,
&cb_data) == 0) && (cb_data.zhp == NULL)) {
(void) zfs_error(hdl, EZFS_NOENT, errbuf);
zfs_close(pzhp);
errno = ENOENT;
return (NULL);
}
if (cb_data.zhp == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
zfs_close(pzhp);
return (NULL);
}
zhp = cb_data.zhp;
/*
* Cleanup.
*/
zfs_close(pzhp);
}
if (!(types & zhp->zfs_type)) {
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
errno = EINVAL;
return (NULL);
}
return (zhp);
}
/*
* Release a ZFS handle. Nothing to do but free the associated memory.
*/
void
zfs_close(zfs_handle_t *zhp)
{
if (zhp->zfs_mntopts)
free(zhp->zfs_mntopts);
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
nvlist_free(zhp->zfs_recvd_props);
free(zhp);
}
typedef struct mnttab_node {
struct mnttab mtn_mt;
avl_node_t mtn_node;
} mnttab_node_t;
static int
libzfs_mnttab_cache_compare(const void *arg1, const void *arg2)
{
const mnttab_node_t *mtn1 = (const mnttab_node_t *)arg1;
const mnttab_node_t *mtn2 = (const mnttab_node_t *)arg2;
int rv;
rv = strcmp(mtn1->mtn_mt.mnt_special, mtn2->mtn_mt.mnt_special);
return (TREE_ISIGN(rv));
}
void
libzfs_mnttab_init(libzfs_handle_t *hdl)
{
pthread_mutex_init(&hdl->libzfs_mnttab_cache_lock, NULL);
assert(avl_numnodes(&hdl->libzfs_mnttab_cache) == 0);
avl_create(&hdl->libzfs_mnttab_cache, libzfs_mnttab_cache_compare,
sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node));
}
static int
libzfs_mnttab_update(libzfs_handle_t *hdl)
{
FILE *mnttab;
struct mnttab entry;
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
while (getmntent(mnttab, &entry) == 0) {
mnttab_node_t *mtn;
avl_index_t where;
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, entry.mnt_special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, entry.mnt_mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, entry.mnt_fstype);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, entry.mnt_mntopts);
/* Exclude duplicate mounts */
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, &where) != NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
continue;
}
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
(void) fclose(mnttab);
return (0);
}
void
libzfs_mnttab_fini(libzfs_handle_t *hdl)
{
void *cookie = NULL;
mnttab_node_t *mtn;
while ((mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie))
!= NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
}
avl_destroy(&hdl->libzfs_mnttab_cache);
(void) pthread_mutex_destroy(&hdl->libzfs_mnttab_cache_lock);
}
void
libzfs_mnttab_cache(libzfs_handle_t *hdl, boolean_t enable)
{
hdl->libzfs_mnttab_enable = enable;
}
int
libzfs_mnttab_find(libzfs_handle_t *hdl, const char *fsname,
struct mnttab *entry)
{
FILE *mnttab;
mnttab_node_t find;
mnttab_node_t *mtn;
int ret = ENOENT;
if (!hdl->libzfs_mnttab_enable) {
struct mnttab srch = { 0 };
if (avl_numnodes(&hdl->libzfs_mnttab_cache))
libzfs_mnttab_fini(hdl);
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
srch.mnt_special = (char *)fsname;
srch.mnt_fstype = (char *)MNTTYPE_ZFS;
ret = getmntany(mnttab, entry, &srch) ? ENOENT : 0;
(void) fclose(mnttab);
return (ret);
}
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0) {
int error;
if ((error = libzfs_mnttab_update(hdl)) != 0) {
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
return (error);
}
}
find.mtn_mt.mnt_special = (char *)fsname;
mtn = avl_find(&hdl->libzfs_mnttab_cache, &find, NULL);
if (mtn) {
*entry = mtn->mtn_mt;
ret = 0;
}
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
return (ret);
}
void
libzfs_mnttab_add(libzfs_handle_t *hdl, const char *special,
const char *mountp, const char *mntopts)
{
mnttab_node_t *mtn;
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
if (avl_numnodes(&hdl->libzfs_mnttab_cache) != 0) {
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, MNTTYPE_ZFS);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, mntopts);
/*
* Another thread may have already added this entry
* via libzfs_mnttab_update. If so we should skip it.
*/
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, NULL) != NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
} else {
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
}
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
}
void
libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname)
{
mnttab_node_t find;
mnttab_node_t *ret;
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
find.mtn_mt.mnt_special = (char *)fsname;
if ((ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL))
!= NULL) {
avl_remove(&hdl->libzfs_mnttab_cache, ret);
free(ret->mtn_mt.mnt_special);
free(ret->mtn_mt.mnt_mountp);
free(ret->mtn_mt.mnt_fstype);
free(ret->mtn_mt.mnt_mntopts);
free(ret);
}
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
}
int
zfs_spa_version(zfs_handle_t *zhp, int *spa_version)
{
zpool_handle_t *zpool_handle = zhp->zpool_hdl;
if (zpool_handle == NULL)
return (-1);
*spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
return (0);
}
/*
* The choice of reservation property depends on the SPA version.
*/
static int
zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop)
{
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
if (spa_version >= SPA_VERSION_REFRESERVATION)
*resv_prop = ZFS_PROP_REFRESERVATION;
else
*resv_prop = ZFS_PROP_RESERVATION;
return (0);
}
/*
* Given an nvlist of properties to set, validates that they are correct, and
* parses any numeric properties (index, boolean, etc) if they are specified as
* strings.
*/
nvlist_t *
zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl,
uint64_t zoned, zfs_handle_t *zhp, zpool_handle_t *zpool_hdl,
boolean_t key_params_ok, const char *errbuf)
{
nvpair_t *elem;
uint64_t intval;
const char *strval;
zfs_prop_t prop;
nvlist_t *ret;
int chosen_normal = -1;
int chosen_utf = -1;
int set_maxbs = 0;
if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
/*
* Make sure this property is valid and applies to this type.
*/
elem = NULL;
while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
const char *propname = nvpair_name(elem);
prop = zfs_name_to_prop(propname);
if (prop == ZPROP_USERPROP && zfs_prop_user(propname)) {
/*
* This is a user property: make sure it's a
* string, and that it's less than ZAP_MAXNAMELEN.
*/
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property name '%s' is too long"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (nvlist_add_string(ret, propname, strval) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
/*
* Currently, only user properties can be modified on
* snapshots.
*/
if (type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"this property can not be modified for snapshots"));
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
if (prop == ZPROP_USERPROP && zfs_prop_userquota(propname)) {
zfs_userquota_prop_t uqtype;
char *newpropname = NULL;
char domain[128];
uint64_t rid;
uint64_t valary[3];
int rc;
if (userquota_propname_decode(propname, zoned,
&uqtype, domain, sizeof (domain), &rid) != 0) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' has an invalid user/group name"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (uqtype != ZFS_PROP_USERQUOTA &&
uqtype != ZFS_PROP_GROUPQUOTA &&
uqtype != ZFS_PROP_USEROBJQUOTA &&
uqtype != ZFS_PROP_GROUPOBJQUOTA &&
uqtype != ZFS_PROP_PROJECTQUOTA &&
uqtype != ZFS_PROP_PROJECTOBJQUOTA) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY,
errbuf);
goto error;
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, "none") == 0) {
intval = 0;
} else if (zfs_nicestrtonum(hdl,
strval, &intval) != 0) {
(void) zfs_error(hdl,
EZFS_BADPROP, errbuf);
goto error;
}
} else if (nvpair_type(elem) ==
DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, &intval);
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable "
"{user|group|project}quota"));
goto error;
}
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* Encode the prop name as
* userquota@<hex-rid>-domain, to make it easy
* for the kernel to decode.
*/
rc = asprintf(&newpropname, "%s%llx-%s",
zfs_userquota_prop_prefixes[uqtype],
(longlong_t)rid, domain);
if (rc == -1 || newpropname == NULL) {
(void) no_memory(hdl);
goto error;
}
valary[0] = uqtype;
valary[1] = rid;
valary[2] = intval;
if (nvlist_add_uint64_array(ret, newpropname,
valary, 3) != 0) {
free(newpropname);
(void) no_memory(hdl);
goto error;
}
free(newpropname);
continue;
} else if (prop == ZPROP_USERPROP &&
zfs_prop_written(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (prop == ZPROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!zfs_prop_valid_for_type(prop, type, B_FALSE)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' does not "
"apply to datasets of this type"), propname);
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
if (zfs_prop_readonly(prop) &&
!(zfs_prop_setonce(prop) && zhp == NULL) &&
!(zfs_prop_encryption_key_param(prop) && key_params_ok)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, type, ret,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform some additional checks for specific properties.
*/
switch (prop) {
case ZFS_PROP_VERSION:
{
int version;
if (zhp == NULL)
break;
version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
if (intval < version) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Can not downgrade; already at version %u"),
version);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_VOLBLOCKSIZE:
case ZFS_PROP_RECORDSIZE:
{
int maxbs = SPA_MAXBLOCKSIZE;
char buf[64];
if (zpool_hdl != NULL) {
maxbs = zpool_get_prop_int(zpool_hdl,
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
}
/*
* The value must be a power of two between
* SPA_MINBLOCKSIZE and maxbs.
*/
if (intval < SPA_MINBLOCKSIZE ||
intval > maxbs || !ISP2(intval)) {
zfs_nicebytes(maxbs, buf, sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be power of 2 from 512B "
"to %s"), propname, buf);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/* save the ZFS_PROP_RECORDSIZE during create op */
if (zpool_hdl == NULL && prop == ZFS_PROP_RECORDSIZE) {
set_maxbs = intval;
}
break;
}
case ZFS_PROP_SPECIAL_SMALL_BLOCKS:
{
int maxbs =
set_maxbs == 0 ? SPA_OLD_MAXBLOCKSIZE : set_maxbs;
char buf[64];
if (zpool_hdl != NULL) {
char state[64] = "";
maxbs = zpool_get_prop_int(zpool_hdl,
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
/*
* Issue a warning but do not fail so that
* tests for settable properties succeed.
*/
if (zpool_prop_get_feature(zpool_hdl,
"feature@allocation_classes", state,
sizeof (state)) != 0 ||
strcmp(state, ZFS_FEATURE_ACTIVE) != 0) {
(void) fprintf(stderr, gettext(
"%s: property requires a special "
"device in the pool\n"), propname);
}
}
if (intval != 0 &&
(intval < SPA_MINBLOCKSIZE ||
intval > maxbs || !ISP2(intval))) {
zfs_nicebytes(maxbs, buf, sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid '%s=%llu' property: must be zero "
"or a power of 2 from 512B to %s"),
propname, (unsigned long long)intval, buf);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
/*
* Verify the mlslabel string and convert to
* internal hex label string.
*/
m_label_t *new_sl;
char *hex = NULL; /* internal label string */
/* Default value is already OK. */
if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
break;
/* Verify the label can be converted to binary form */
if (((new_sl = m_label_alloc(MAC_LABEL)) == NULL) ||
(str_to_label(strval, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1)) {
goto badlabel;
}
/* Now translate to hex internal label string */
if (label_to_str(new_sl, &hex, M_INTERNAL,
DEF_NAMES) != 0) {
if (hex)
free(hex);
goto badlabel;
}
m_label_free(new_sl);
/* If string is already in internal form, we're done. */
if (strcmp(strval, hex) == 0) {
free(hex);
break;
}
/* Replace the label string with the internal form. */
(void) nvlist_remove(ret, zfs_prop_to_name(prop),
DATA_TYPE_STRING);
fnvlist_add_string(ret, zfs_prop_to_name(prop), hex);
free(hex);
break;
badlabel:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid mlslabel '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
m_label_free(new_sl); /* OK if null */
goto error;
#else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"mlslabels are unsupported"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
#endif /* HAVE_MLSLABEL */
}
case ZFS_PROP_MOUNTPOINT:
{
namecheck_err_t why;
if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 ||
strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0)
break;
if (mountpoint_namecheck(strval, &why)) {
switch (why) {
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' must be an absolute path, "
"'none', or 'legacy'"), propname);
break;
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"component of '%s' is too long"),
propname);
break;
default:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"(%d) not defined"),
why);
break;
}
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
zfs_fallthrough;
}
case ZFS_PROP_SHARESMB:
case ZFS_PROP_SHARENFS:
/*
* For the mountpoint and sharenfs or sharesmb
* properties, check if it can be set in a
* global/non-global zone based on
* the zoned property value:
*
* global zone non-global zone
* --------------------------------------------------
* zoned=on mountpoint (no) mountpoint (yes)
* sharenfs (no) sharenfs (no)
* sharesmb (no) sharesmb (no)
*
* zoned=off mountpoint (yes) N/A
* sharenfs (yes)
* sharesmb (yes)
*/
if (zoned) {
if (getzoneid() == GLOBAL_ZONEID) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set on "
"dataset in a non-global zone"),
propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
} else if (prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set in "
"a non-global zone"), propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
}
} else if (getzoneid() != GLOBAL_ZONEID) {
/*
* If zoned property is 'off', this must be in
* a global zone. If not, something is wrong.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set while dataset "
"'zoned' property is set"), propname);
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
/*
* At this point, it is legitimate to set the
* property. Now we want to make sure that the
* property value is valid if it is sharenfs.
*/
if ((prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) &&
strcmp(strval, "on") != 0 &&
strcmp(strval, "off") != 0) {
enum sa_protocol proto;
if (prop == ZFS_PROP_SHARESMB)
proto = SA_PROTOCOL_SMB;
else
proto = SA_PROTOCOL_NFS;
if (sa_validate_shareopts(strval, proto) !=
SA_OK) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set to invalid "
"options"), propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
break;
case ZFS_PROP_KEYLOCATION:
if (!zfs_prop_valid_keylocation(strval, B_FALSE)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid keylocation"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zhp != NULL) {
uint64_t crypt =
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
if (crypt == ZIO_CRYPT_OFF &&
strcmp(strval, "none") != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation must be 'none' "
"for unencrypted datasets"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
} else if (crypt != ZIO_CRYPT_OFF &&
strcmp(strval, "none") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation must not be 'none' "
"for encrypted datasets"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
break;
case ZFS_PROP_PBKDF2_ITERS:
if (intval < MIN_PBKDF2_ITERATIONS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"minimum pbkdf2 iterations is %u"),
MIN_PBKDF2_ITERATIONS);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZFS_PROP_UTF8ONLY:
chosen_utf = (int)intval;
break;
case ZFS_PROP_NORMALIZE:
chosen_normal = (int)intval;
break;
default:
break;
}
/*
* For changes to existing volumes, we have some additional
* checks to enforce.
*/
if (type == ZFS_TYPE_VOLUME && zhp != NULL) {
uint64_t blocksize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLBLOCKSIZE);
char buf[64];
switch (prop) {
case ZFS_PROP_VOLSIZE:
if (intval % blocksize != 0) {
zfs_nicebytes(blocksize, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a multiple of "
"volume block size (%s)"),
propname, buf);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be zero"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
default:
break;
}
}
/* check encryption properties */
if (zhp != NULL) {
int64_t crypt = zfs_prop_get_int(zhp,
ZFS_PROP_ENCRYPTION);
switch (prop) {
case ZFS_PROP_COPIES:
if (crypt != ZIO_CRYPT_OFF && intval > 2) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encrypted datasets cannot have "
"3 copies"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
default:
break;
}
}
}
/*
* If normalization was chosen, but no UTF8 choice was made,
* enforce rejection of non-UTF8 names.
*
* If normalization was chosen, but rejecting non-UTF8 names
* was explicitly not chosen, it is an error.
*
* If utf8only was turned off, but the parent has normalization,
* turn off normalization.
*/
if (chosen_normal > 0 && chosen_utf < 0) {
if (nvlist_add_uint64(ret,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) {
(void) no_memory(hdl);
goto error;
}
} else if (chosen_normal > 0 && chosen_utf == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be set 'on' if normalization chosen"),
zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
} else if (chosen_normal < 0 && chosen_utf == 0) {
if (nvlist_add_uint64(ret,
zfs_prop_to_name(ZFS_PROP_NORMALIZE), 0) != 0) {
(void) no_memory(hdl);
goto error;
}
}
return (ret);
error:
nvlist_free(ret);
return (NULL);
}
static int
zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t old_volsize;
uint64_t new_volsize;
uint64_t old_reservation;
uint64_t new_reservation;
zfs_prop_t resv_prop;
nvlist_t *props;
zpool_handle_t *zph = zpool_handle(zhp);
/*
* If this is an existing volume, and someone is setting the volsize,
* make sure that it matches the reservation, or add it if necessary.
*/
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_reservation = zfs_prop_get_int(zhp, resv_prop);
props = fnvlist_alloc();
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
if ((zvol_volsize_to_reservation(zph, old_volsize, props) !=
old_reservation) || nvlist_exists(nvl,
zfs_prop_to_name(resv_prop))) {
fnvlist_free(props);
return (0);
}
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
&new_volsize) != 0) {
fnvlist_free(props);
return (-1);
}
new_reservation = zvol_volsize_to_reservation(zph, new_volsize, props);
fnvlist_free(props);
if (nvlist_add_uint64(nvl, zfs_prop_to_name(resv_prop),
new_reservation) != 0) {
(void) no_memory(zhp->zfs_hdl);
return (-1);
}
return (1);
}
/*
* Helper for 'zfs {set|clone} refreservation=auto'. Must be called after
* zfs_valid_proplist(), as it is what sets the UINT64_MAX sentinel value.
* Return codes must match zfs_add_synthetic_resv().
*/
static int
zfs_fix_auto_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t volsize;
uint64_t resvsize;
zfs_prop_t prop;
nvlist_t *props;
if (!ZFS_IS_VOLUME(zhp)) {
return (0);
}
if (zfs_which_resv_prop(zhp, &prop) != 0) {
return (-1);
}
if (prop != ZFS_PROP_REFRESERVATION) {
return (0);
}
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(prop), &resvsize) != 0) {
/* No value being set, so it can't be "auto" */
return (0);
}
if (resvsize != UINT64_MAX) {
/* Being set to a value other than "auto" */
return (0);
}
props = fnvlist_alloc();
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
&volsize) != 0) {
volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
}
resvsize = zvol_volsize_to_reservation(zpool_handle(zhp), volsize,
props);
fnvlist_free(props);
(void) nvlist_remove_all(nvl, zfs_prop_to_name(prop));
if (nvlist_add_uint64(nvl, zfs_prop_to_name(prop), resvsize) != 0) {
(void) no_memory(zhp->zfs_hdl);
return (-1);
}
return (1);
}
static boolean_t
zfs_is_namespace_prop(zfs_prop_t prop)
{
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_SETUID:
case ZFS_PROP_READONLY:
case ZFS_PROP_XATTR:
case ZFS_PROP_NBMAND:
return (B_TRUE);
default:
return (B_FALSE);
}
}
/*
* Given a property name and value, set the property for the given dataset.
*/
int
zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
{
int ret = -1;
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl = NULL;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_string(nvl, propname, propval) != 0) {
(void) no_memory(hdl);
goto error;
}
ret = zfs_prop_set_list(zhp, nvl);
error:
nvlist_free(nvl);
return (ret);
}
/*
* Given an nvlist of property names and values, set the properties for the
* given dataset.
*/
int
zfs_prop_set_list(zfs_handle_t *zhp, nvlist_t *props)
{
return (zfs_prop_set_list_flags(zhp, props, 0));
}
/*
* Given an nvlist of property names, values and flags, set the properties
* for the given dataset. If ZFS_SET_NOMOUNT is set, it allows to update
* mountpoint, sharenfs and sharesmb properties without (un/re)mounting
* and (un/re)sharing the dataset.
*/
int
zfs_prop_set_list_flags(zfs_handle_t *zhp, nvlist_t *props, int flags)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
prop_changelist_t **cls = NULL;
int cl_idx;
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl;
int nvl_len = 0;
int added_resv = 0;
zfs_prop_t prop;
boolean_t nsprop = B_FALSE;
nvpair_t *elem;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if ((nvl = zfs_valid_proplist(hdl, zhp->zfs_type, props,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, zhp->zpool_hdl,
B_FALSE, errbuf)) == NULL)
goto error;
/*
* We have to check for any extra properties which need to be added
* before computing the length of the nvlist.
*/
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
if (zfs_name_to_prop(nvpair_name(elem)) == ZFS_PROP_VOLSIZE &&
(added_resv = zfs_add_synthetic_resv(zhp, nvl)) == -1) {
goto error;
}
}
if (added_resv != 1 &&
(added_resv = zfs_fix_auto_resv(zhp, nvl)) == -1) {
goto error;
}
/*
* Check how many properties we're setting and allocate an array to
* store changelist pointers for postfix().
*/
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem))
nvl_len++;
if ((cls = calloc(nvl_len, sizeof (prop_changelist_t *))) == NULL)
goto error;
cl_idx = 0;
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
nsprop |= zfs_is_namespace_prop(prop);
assert(cl_idx < nvl_len);
/*
* We don't want to unmount & remount the dataset when changing
* its canmount property to 'on' or 'noauto'. We only use
* the changelist logic to unmount when setting canmount=off.
*/
if (prop != ZFS_PROP_CANMOUNT ||
(fnvpair_value_uint64(elem) == ZFS_CANMOUNT_OFF &&
zfs_is_mounted(zhp, NULL))) {
cls[cl_idx] = changelist_gather(zhp, prop,
((flags & ZFS_SET_NOMOUNT) ?
CL_GATHER_DONT_UNMOUNT : 0), 0);
if (cls[cl_idx] == NULL)
goto error;
}
if (prop == ZFS_PROP_MOUNTPOINT &&
changelist_haszonedchild(cls[cl_idx])) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if (cls[cl_idx] != NULL &&
(ret = changelist_prefix(cls[cl_idx])) != 0)
goto error;
cl_idx++;
}
assert(cl_idx == nvl_len);
/*
* Execute the corresponding ioctl() to set this list of properties.
*/
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zcmd_write_src_nvlist(hdl, &zc, nvl);
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
if (ret != 0) {
if (zc.zc_nvlist_dst_filled == B_FALSE) {
(void) zfs_standard_error(hdl, errno, errbuf);
goto error;
}
/* Get the list of unset properties back and report them. */
nvlist_t *errorprops = NULL;
if (zcmd_read_dst_nvlist(hdl, &zc, &errorprops) != 0)
goto error;
for (nvpair_t *elem = nvlist_next_nvpair(errorprops, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errorprops, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
zfs_setprop_error(hdl, prop, errno, errbuf);
}
nvlist_free(errorprops);
if (added_resv && errno == ENOSPC) {
/* clean up the volsize property we tried to set */
uint64_t old_volsize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLSIZE);
nvlist_free(nvl);
nvl = NULL;
zcmd_free_nvlists(&zc);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
goto error;
if (nvlist_add_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_VOLSIZE),
old_volsize) != 0)
goto error;
zcmd_write_src_nvlist(hdl, &zc, nvl);
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
}
} else {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL) {
int clp_err = changelist_postfix(cls[cl_idx]);
if (clp_err != 0)
ret = clp_err;
}
}
if (ret == 0) {
/*
* Refresh the statistics so the new property
* value is reflected.
*/
(void) get_stats(zhp);
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (nsprop && zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
}
}
error:
nvlist_free(nvl);
zcmd_free_nvlists(&zc);
if (cls != NULL) {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL)
changelist_free(cls[cl_idx]);
}
free(cls);
}
return (ret);
}
/*
* Given a property, inherit the value from the parent dataset, or if received
* is TRUE, revert to the received value, if any.
*/
int
zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received)
{
zfs_cmd_t zc = {"\0"};
int ret;
prop_changelist_t *cl;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[ERRBUFLEN];
zfs_prop_t prop;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot inherit %s for '%s'"), propname, zhp->zfs_name);
zc.zc_cookie = received;
if ((prop = zfs_name_to_prop(propname)) == ZPROP_USERPROP) {
/*
* For user properties, the amount of work we have to do is very
* small, so just do it here.
*/
if (!zfs_prop_user(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0)
return (zfs_standard_error(hdl, errno, errbuf));
(void) get_stats(zhp);
return (0);
}
/*
* Verify that this property is inheritable.
*/
if (zfs_prop_readonly(prop))
return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf));
if (!zfs_prop_inheritable(prop) && !received)
return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf));
/*
* Check to see if the value applies to this type
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
return (zfs_error(hdl, EZFS_PROPTYPE, errbuf));
/*
* Normalize the name, to get rid of shorthand abbreviations.
*/
propname = zfs_prop_to_name(prop);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
/*
* Determine datasets which will be affected by this change, if any.
*/
if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
return (-1);
if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0) {
changelist_free(cl);
return (zfs_standard_error(hdl, errno, errbuf));
} else {
if ((ret = changelist_postfix(cl)) != 0)
goto error;
/*
* Refresh the statistics so the new property is reflected.
*/
(void) get_stats(zhp);
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (zfs_is_namespace_prop(prop) &&
zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
}
error:
changelist_free(cl);
return (ret);
}
/*
* True DSL properties are stored in an nvlist. The following two functions
* extract them appropriately.
*/
uint64_t
getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, const char **source)
{
nvlist_t *nv;
uint64_t value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
value = zfs_prop_default_numeric(prop);
*source = "";
}
return (value);
}
static const char *
getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, const char **source)
{
nvlist_t *nv;
const char *value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
value = fnvlist_lookup_string(nv, ZPROP_VALUE);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
value = zfs_prop_default_string(prop);
*source = "";
}
return (value);
}
static boolean_t
zfs_is_recvd_props_mode(zfs_handle_t *zhp)
{
return (zhp->zfs_props != NULL &&
zhp->zfs_props == zhp->zfs_recvd_props);
}
static void
zfs_set_recvd_props_mode(zfs_handle_t *zhp, uintptr_t *cookie)
{
*cookie = (uintptr_t)zhp->zfs_props;
zhp->zfs_props = zhp->zfs_recvd_props;
}
static void
zfs_unset_recvd_props_mode(zfs_handle_t *zhp, uintptr_t *cookie)
{
zhp->zfs_props = (nvlist_t *)*cookie;
*cookie = 0;
}
/*
* Internal function for getting a numeric property. Both zfs_prop_get() and
* zfs_prop_get_int() are built using this interface.
*
* Certain properties can be overridden using 'mount -o'. In this case, scan
* the contents of the /proc/self/mounts entry, searching for the
* appropriate options. If they differ from the on-disk values, report the
* current values and mark the source "temporary".
*/
static int
get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src,
const char **source, uint64_t *val)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *zplprops = NULL;
struct mnttab mnt;
const char *mntopt_on = NULL;
const char *mntopt_off = NULL;
boolean_t received = zfs_is_recvd_props_mode(zhp);
*source = NULL;
/*
* If the property is being fetched for a snapshot, check whether
* the property is valid for the snapshot's head dataset type.
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT &&
!zfs_prop_valid_for_type(prop, zhp->zfs_head_type, B_TRUE)) {
*val = zfs_prop_default_numeric(prop);
return (-1);
}
switch (prop) {
case ZFS_PROP_ATIME:
mntopt_on = MNTOPT_ATIME;
mntopt_off = MNTOPT_NOATIME;
break;
case ZFS_PROP_RELATIME:
mntopt_on = MNTOPT_RELATIME;
mntopt_off = MNTOPT_NORELATIME;
break;
case ZFS_PROP_DEVICES:
mntopt_on = MNTOPT_DEVICES;
mntopt_off = MNTOPT_NODEVICES;
break;
case ZFS_PROP_EXEC:
mntopt_on = MNTOPT_EXEC;
mntopt_off = MNTOPT_NOEXEC;
break;
case ZFS_PROP_READONLY:
mntopt_on = MNTOPT_RO;
mntopt_off = MNTOPT_RW;
break;
case ZFS_PROP_SETUID:
mntopt_on = MNTOPT_SETUID;
mntopt_off = MNTOPT_NOSETUID;
break;
case ZFS_PROP_XATTR:
mntopt_on = MNTOPT_XATTR;
mntopt_off = MNTOPT_NOXATTR;
break;
case ZFS_PROP_NBMAND:
mntopt_on = MNTOPT_NBMAND;
mntopt_off = MNTOPT_NONBMAND;
break;
default:
break;
}
/*
* Because looking up the mount options is potentially expensive
* (iterating over all of /proc/self/mounts), we defer its
* calculation until we're looking up a property which requires
* its presence.
*/
if (!zhp->zfs_mntcheck &&
(mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) {
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
if (libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)
zhp->zfs_mntopts = zfs_strdup(hdl,
entry.mnt_mntopts);
zhp->zfs_mntcheck = B_TRUE;
}
if (zhp->zfs_mntopts == NULL)
mnt.mnt_mntopts = (char *)"";
else
mnt.mnt_mntopts = zhp->zfs_mntopts;
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_READONLY:
case ZFS_PROP_SETUID:
#ifndef __FreeBSD__
case ZFS_PROP_XATTR:
#endif
case ZFS_PROP_NBMAND:
*val = getprop_uint64(zhp, prop, source);
if (received)
break;
if (hasmntopt(&mnt, mntopt_on) && !*val) {
*val = B_TRUE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
} else if (hasmntopt(&mnt, mntopt_off) && *val) {
*val = B_FALSE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
}
break;
case ZFS_PROP_CANMOUNT:
case ZFS_PROP_VOLSIZE:
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
*val = getprop_uint64(zhp, prop, source);
if (*source == NULL) {
/* not default, must be local */
*source = zhp->zfs_name;
}
break;
case ZFS_PROP_MOUNTED:
*val = (zhp->zfs_mntopts != NULL);
break;
case ZFS_PROP_NUMCLONES:
*val = zhp->zfs_dmustats.dds_num_clones;
break;
case ZFS_PROP_VERSION:
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
case ZFS_PROP_CASE:
zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) {
zcmd_free_nvlists(&zc);
if (prop == ZFS_PROP_VERSION &&
zhp->zfs_type == ZFS_TYPE_VOLUME)
*val = zfs_prop_default_numeric(prop);
return (-1);
}
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 ||
nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop),
val) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
nvlist_free(zplprops);
zcmd_free_nvlists(&zc);
break;
case ZFS_PROP_INCONSISTENT:
*val = zhp->zfs_dmustats.dds_inconsistent;
break;
case ZFS_PROP_REDACTED:
*val = zhp->zfs_dmustats.dds_redacted;
break;
case ZFS_PROP_GUID:
if (zhp->zfs_dmustats.dds_guid != 0)
*val = zhp->zfs_dmustats.dds_guid;
else
*val = getprop_uint64(zhp, prop, source);
break;
case ZFS_PROP_CREATETXG:
/*
* We can directly read createtxg property from zfs
* handle for Filesystem, Snapshot and ZVOL types.
*/
if (((zhp->zfs_type == ZFS_TYPE_FILESYSTEM) ||
(zhp->zfs_type == ZFS_TYPE_SNAPSHOT) ||
(zhp->zfs_type == ZFS_TYPE_VOLUME)) &&
(zhp->zfs_dmustats.dds_creation_txg != 0)) {
*val = zhp->zfs_dmustats.dds_creation_txg;
break;
} else {
*val = getprop_uint64(zhp, prop, source);
}
zfs_fallthrough;
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
case PROP_TYPE_INDEX:
*val = getprop_uint64(zhp, prop, source);
/*
* If we tried to use a default value for a
* readonly property, it means that it was not
* present. Note this only applies to "truly"
* readonly properties, not set-once properties
* like volblocksize.
*/
if (zfs_prop_readonly(prop) &&
!zfs_prop_setonce(prop) &&
*source != NULL && (*source)[0] == '\0') {
*source = NULL;
return (-1);
}
break;
case PROP_TYPE_STRING:
default:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"cannot get non-numeric property"));
return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "internal error")));
}
}
return (0);
}
/*
* Calculate the source type, given the raw source string.
*/
static void
get_source(zfs_handle_t *zhp, zprop_source_t *srctype, const char *source,
char *statbuf, size_t statlen)
{
if (statbuf == NULL ||
srctype == NULL || *srctype == ZPROP_SRC_TEMPORARY) {
return;
}
if (source == NULL) {
*srctype = ZPROP_SRC_NONE;
} else if (source[0] == '\0') {
*srctype = ZPROP_SRC_DEFAULT;
} else if (strstr(source, ZPROP_SOURCE_VAL_RECVD) != NULL) {
*srctype = ZPROP_SRC_RECEIVED;
} else {
if (strcmp(source, zhp->zfs_name) == 0) {
*srctype = ZPROP_SRC_LOCAL;
} else {
(void) strlcpy(statbuf, source, statlen);
*srctype = ZPROP_SRC_INHERITED;
}
}
}
int
zfs_prop_get_recvd(zfs_handle_t *zhp, const char *propname, char *propbuf,
size_t proplen, boolean_t literal)
{
zfs_prop_t prop;
int err = 0;
if (zhp->zfs_recvd_props == NULL)
if (get_recvd_props_ioctl(zhp) != 0)
return (-1);
prop = zfs_name_to_prop(propname);
if (prop != ZPROP_USERPROP) {
uintptr_t cookie;
if (!nvlist_exists(zhp->zfs_recvd_props, propname))
return (-1);
zfs_set_recvd_props_mode(zhp, &cookie);
err = zfs_prop_get(zhp, prop, propbuf, proplen,
NULL, NULL, 0, literal);
zfs_unset_recvd_props_mode(zhp, &cookie);
} else {
nvlist_t *propval;
const char *recvdval;
if (nvlist_lookup_nvlist(zhp->zfs_recvd_props,
propname, &propval) != 0)
return (-1);
recvdval = fnvlist_lookup_string(propval, ZPROP_VALUE);
(void) strlcpy(propbuf, recvdval, proplen);
}
return (err == 0 ? 0 : -1);
}
static int
get_clones_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
{
nvlist_t *value;
nvpair_t *pair;
value = zfs_get_clones_nvl(zhp);
if (value == NULL || nvlist_empty(value))
return (-1);
propbuf[0] = '\0';
for (pair = nvlist_next_nvpair(value, NULL); pair != NULL;
pair = nvlist_next_nvpair(value, pair)) {
if (propbuf[0] != '\0')
(void) strlcat(propbuf, ",", proplen);
(void) strlcat(propbuf, nvpair_name(pair), proplen);
}
return (0);
}
struct get_clones_arg {
uint64_t numclones;
nvlist_t *value;
const char *origin;
char buf[ZFS_MAX_DATASET_NAME_LEN];
};
static int
get_clones_cb(zfs_handle_t *zhp, void *arg)
{
struct get_clones_arg *gca = arg;
if (gca->numclones == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, gca->buf, sizeof (gca->buf),
NULL, NULL, 0, B_TRUE) != 0)
goto out;
if (strcmp(gca->buf, gca->origin) == 0) {
fnvlist_add_boolean(gca->value, zfs_get_name(zhp));
gca->numclones--;
}
out:
(void) zfs_iter_children_v2(zhp, 0, get_clones_cb, gca);
zfs_close(zhp);
return (0);
}
nvlist_t *
zfs_get_clones_nvl(zfs_handle_t *zhp)
{
nvlist_t *nv, *value;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), &nv) != 0) {
struct get_clones_arg gca;
/*
* if this is a snapshot, then the kernel wasn't able
* to get the clones. Do it by slowly iterating.
*/
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT)
return (NULL);
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0)
return (NULL);
if (nvlist_alloc(&value, NV_UNIQUE_NAME, 0) != 0) {
nvlist_free(nv);
return (NULL);
}
gca.numclones = zfs_prop_get_int(zhp, ZFS_PROP_NUMCLONES);
gca.value = value;
gca.origin = zhp->zfs_name;
if (gca.numclones != 0) {
zfs_handle_t *root;
char pool[ZFS_MAX_DATASET_NAME_LEN];
char *cp = pool;
/* get the pool name */
(void) strlcpy(pool, zhp->zfs_name, sizeof (pool));
(void) strsep(&cp, "/@");
root = zfs_open(zhp->zfs_hdl, pool,
ZFS_TYPE_FILESYSTEM);
if (root == NULL) {
nvlist_free(nv);
nvlist_free(value);
return (NULL);
}
(void) get_clones_cb(root, &gca);
}
if (gca.numclones != 0 ||
nvlist_add_nvlist(nv, ZPROP_VALUE, value) != 0 ||
nvlist_add_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), nv) != 0) {
nvlist_free(nv);
nvlist_free(value);
return (NULL);
}
nvlist_free(nv);
nvlist_free(value);
nv = fnvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES));
}
return (fnvlist_lookup_nvlist(nv, ZPROP_VALUE));
}
static int
get_rsnaps_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
{
nvlist_t *value;
uint64_t *snaps;
uint_t nsnaps;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &value) != 0)
return (-1);
if (nvlist_lookup_uint64_array(value, ZPROP_VALUE, &snaps,
&nsnaps) != 0)
return (-1);
if (nsnaps == 0) {
/* There's no redaction snapshots; pass a special value back */
(void) snprintf(propbuf, proplen, "none");
return (0);
}
propbuf[0] = '\0';
for (int i = 0; i < nsnaps; i++) {
char buf[128];
if (propbuf[0] != '\0')
(void) strlcat(propbuf, ",", proplen);
(void) snprintf(buf, sizeof (buf), "%llu",
(u_longlong_t)snaps[i]);
(void) strlcat(propbuf, buf, proplen);
}
return (0);
}
/*
* Accepts a property and value and checks that the value
* matches the one found by the channel program. If they are
* not equal, print both of them.
*/
static void
zcp_check(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t intval,
const char *strval)
{
if (!zhp->zfs_hdl->libzfs_prop_debug)
return;
int error;
char *poolname = zhp->zpool_hdl->zpool_name;
const char *prop_name = zfs_prop_to_name(prop);
const char *program =
"args = ...\n"
"ds = args['dataset']\n"
"prop = args['property']\n"
"value, setpoint = zfs.get_prop(ds, prop)\n"
"return {value=value, setpoint=setpoint}\n";
nvlist_t *outnvl;
nvlist_t *retnvl;
nvlist_t *argnvl = fnvlist_alloc();
fnvlist_add_string(argnvl, "dataset", zhp->zfs_name);
fnvlist_add_string(argnvl, "property", zfs_prop_to_name(prop));
error = lzc_channel_program_nosync(poolname, program,
10 * 1000 * 1000, 10 * 1024 * 1024, argnvl, &outnvl);
if (error == 0) {
retnvl = fnvlist_lookup_nvlist(outnvl, "return");
if (zfs_prop_get_type(prop) == PROP_TYPE_NUMBER) {
int64_t ans;
error = nvlist_lookup_int64(retnvl, "value", &ans);
if (error != 0) {
(void) fprintf(stderr, "%s: zcp check error: "
"%u\n", prop_name, error);
return;
}
if (ans != intval) {
(void) fprintf(stderr, "%s: zfs found %llu, "
"but zcp found %llu\n", prop_name,
(u_longlong_t)intval, (u_longlong_t)ans);
}
} else {
const char *str_ans;
error = nvlist_lookup_string(retnvl, "value", &str_ans);
if (error != 0) {
(void) fprintf(stderr, "%s: zcp check error: "
"%u\n", prop_name, error);
return;
}
if (strcmp(strval, str_ans) != 0) {
(void) fprintf(stderr,
"%s: zfs found '%s', but zcp found '%s'\n",
prop_name, strval, str_ans);
}
}
} else {
(void) fprintf(stderr, "%s: zcp check failed, channel program "
"error: %u\n", prop_name, error);
}
nvlist_free(argnvl);
nvlist_free(outnvl);
}
/*
* Retrieve a property from the given object. If 'literal' is specified, then
* numbers are left as exact values. Otherwise, numbers are converted to a
* human-readable form.
*
* Returns 0 on success, or -1 on error.
*/
int
zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
{
const char *source = NULL;
uint64_t val;
const char *str;
const char *strval;
boolean_t received = zfs_is_recvd_props_mode(zhp);
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
return (-1);
if (received && zfs_prop_readonly(prop))
return (-1);
if (src)
*src = ZPROP_SRC_NONE;
switch (prop) {
case ZFS_PROP_CREATION:
/*
* 'creation' is a time_t stored in the statistics. We convert
* this into a string unless 'literal' is specified.
*/
{
val = getprop_uint64(zhp, prop, &source);
time_t time = (time_t)val;
struct tm t;
if (literal ||
localtime_r(&time, &t) == NULL ||
strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
&t) == 0)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_MOUNTPOINT:
/*
* Getting the precise mountpoint can be tricky.
*
* - for 'none' or 'legacy', return those values.
* - for inherited mountpoints, we want to take everything
* after our ancestor and append it to the inherited value.
*
* If the pool has an alternate root, we want to prepend that
* root to any values we return.
*/
str = getprop_string(zhp, prop, &source);
if (str[0] == '/') {
char buf[MAXPATHLEN];
char *root = buf;
const char *relpath;
/*
* If we inherit the mountpoint, even from a dataset
* with a received value, the source will be the path of
* the dataset we inherit from. If source is
* ZPROP_SOURCE_VAL_RECVD, the received value is not
* inherited.
*/
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
relpath = "";
} else {
relpath = zhp->zfs_name + strlen(source);
if (relpath[0] == '/')
relpath++;
}
if ((zpool_get_prop(zhp->zpool_hdl,
ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL,
B_FALSE)) || (strcmp(root, "-") == 0))
root[0] = '\0';
/*
* Special case an alternate root of '/'. This will
* avoid having multiple leading slashes in the
* mountpoint path.
*/
if (strcmp(root, "/") == 0)
root++;
/*
* If the mountpoint is '/' then skip over this
* if we are obtaining either an alternate root or
* an inherited mountpoint.
*/
if (str[1] == '\0' && (root[0] != '\0' ||
relpath[0] != '\0'))
str++;
if (relpath[0] == '\0')
(void) snprintf(propbuf, proplen, "%s%s",
root, str);
else
(void) snprintf(propbuf, proplen, "%s%s%s%s",
root, str, relpath[0] == '@' ? "" : "/",
relpath);
} else {
/* 'legacy' or 'none' */
(void) strlcpy(propbuf, str, proplen);
}
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_ORIGIN:
if (*zhp->zfs_dmustats.dds_origin != '\0') {
str = (char *)&zhp->zfs_dmustats.dds_origin;
} else {
str = getprop_string(zhp, prop, &source);
}
if (str == NULL || *str == '\0')
str = zfs_prop_default_string(prop);
if (str == NULL)
return (-1);
(void) strlcpy(propbuf, str, proplen);
zcp_check(zhp, prop, 0, str);
break;
case ZFS_PROP_REDACT_SNAPS:
if (get_rsnaps_string(zhp, propbuf, proplen) != 0)
return (-1);
break;
case ZFS_PROP_CLONES:
if (get_clones_string(zhp, propbuf, proplen) != 0)
return (-1);
break;
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If quota or reservation is 0, we translate this into 'none'
* (unless literal is set), and indicate that it's the default
* value. Otherwise, we print the number nicely and indicate
* that its set locally.
*/
if (val == 0) {
if (literal)
(void) strlcpy(propbuf, "0", proplen);
else
(void) strlcpy(propbuf, "none", proplen);
} else {
if (literal)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
else
zfs_nicebytes(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If limit is UINT64_MAX, we translate this into 'none', and
* indicate that it's the default value. Otherwise, we print
* the number nicely and indicate that it's set locally.
*/
if (val == UINT64_MAX) {
(void) strlcpy(propbuf, "none", proplen);
} else if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else {
zfs_nicenum(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_REFRATIO:
case ZFS_PROP_COMPRESSRATIO:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
if (literal)
(void) snprintf(propbuf, proplen, "%llu.%02llu",
(u_longlong_t)(val / 100),
(u_longlong_t)(val % 100));
else
(void) snprintf(propbuf, proplen, "%llu.%02llux",
(u_longlong_t)(val / 100),
(u_longlong_t)(val % 100));
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_TYPE:
switch (zhp->zfs_type) {
case ZFS_TYPE_FILESYSTEM:
str = "filesystem";
break;
case ZFS_TYPE_VOLUME:
str = "volume";
break;
case ZFS_TYPE_SNAPSHOT:
str = "snapshot";
break;
case ZFS_TYPE_BOOKMARK:
str = "bookmark";
break;
default:
abort();
}
(void) snprintf(propbuf, proplen, "%s", str);
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_MOUNTED:
/*
* The 'mounted' property is a pseudo-property that described
* whether the filesystem is currently mounted. Even though
* it's a boolean value, the typical values of "on" and "off"
* don't make sense, so we translate to "yes" and "no".
*/
if (get_numeric_property(zhp, ZFS_PROP_MOUNTED,
src, &source, &val) != 0)
return (-1);
if (val)
(void) strlcpy(propbuf, "yes", proplen);
else
(void) strlcpy(propbuf, "no", proplen);
break;
case ZFS_PROP_NAME:
/*
* The 'name' property is a pseudo-property derived from the
* dataset name. It is presented as a real property to simplify
* consumers.
*/
(void) strlcpy(propbuf, zhp->zfs_name, proplen);
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
m_label_t *new_sl = NULL;
char *ascii = NULL; /* human readable label */
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
if (literal || (strcasecmp(propbuf,
ZFS_MLSLABEL_DEFAULT) == 0))
break;
/*
* Try to translate the internal hex string to
* human-readable output. If there are any
* problems just use the hex string.
*/
if (str_to_label(propbuf, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1) {
m_label_free(new_sl);
break;
}
if (label_to_str(new_sl, &ascii, M_LABEL,
DEF_NAMES) != 0) {
if (ascii)
free(ascii);
m_label_free(new_sl);
break;
}
m_label_free(new_sl);
(void) strlcpy(propbuf, ascii, proplen);
free(ascii);
#else
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
#endif /* HAVE_MLSLABEL */
}
break;
case ZFS_PROP_GUID:
case ZFS_PROP_KEY_GUID:
case ZFS_PROP_IVSET_GUID:
case ZFS_PROP_CREATETXG:
case ZFS_PROP_OBJSETID:
case ZFS_PROP_PBKDF2_ITERS:
/*
* These properties are stored as numbers, but they are
* identifiers or counters.
* We don't want them to be pretty printed, because pretty
* printing truncates their values making them useless.
*/
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
(void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val);
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_REFERENCED:
case ZFS_PROP_AVAILABLE:
case ZFS_PROP_USED:
case ZFS_PROP_USEDSNAP:
case ZFS_PROP_USEDDS:
case ZFS_PROP_USEDREFRESERV:
case ZFS_PROP_USEDCHILD:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else {
zfs_nicebytes(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_SNAPSHOTS_CHANGED:
{
if ((get_numeric_property(zhp, prop, src, &source,
&val) != 0) || val == 0) {
return (-1);
}
time_t time = (time_t)val;
struct tm t;
if (literal ||
localtime_r(&time, &t) == NULL ||
strftime(propbuf, proplen, "%a %b %e %k:%M:%S %Y",
&t) == 0)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
}
zcp_check(zhp, prop, val, NULL);
break;
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0) {
return (-1);
}
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else {
zfs_nicenum(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case PROP_TYPE_STRING:
str = getprop_string(zhp, prop, &source);
if (str == NULL)
return (-1);
(void) strlcpy(propbuf, str, proplen);
zcp_check(zhp, prop, 0, str);
break;
case PROP_TYPE_INDEX:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0)
return (-1);
if (zfs_prop_index_to_string(prop, val, &strval) != 0)
return (-1);
(void) strlcpy(propbuf, strval, proplen);
zcp_check(zhp, prop, 0, strval);
break;
default:
abort();
}
}
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
/*
* Utility function to get the given numeric property. Does no validation that
* the given property is the appropriate type; should only be used with
* hard-coded property types.
*/
uint64_t
zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
{
const char *source;
uint64_t val = 0;
(void) get_numeric_property(zhp, prop, NULL, &source, &val);
return (val);
}
static int
zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val)
{
char buf[64];
(void) snprintf(buf, sizeof (buf), "%llu", (longlong_t)val);
return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf));
}
/*
* Similar to zfs_prop_get(), but returns the value as an integer.
*/
int
zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
zprop_source_t *src, char *statbuf, size_t statlen)
{
const char *source;
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE)) {
return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE,
dgettext(TEXT_DOMAIN, "cannot get property '%s'"),
zfs_prop_to_name(prop)));
}
if (src)
*src = ZPROP_SRC_NONE;
if (get_numeric_property(zhp, prop, src, &source, value) != 0)
return (-1);
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
#ifdef HAVE_IDMAP
static int
idmap_id_to_numeric_domain_rid(uid_t id, boolean_t isuser,
char **domainp, idmap_rid_t *ridp)
{
idmap_get_handle_t *get_hdl = NULL;
idmap_stat status;
int err = EINVAL;
if (idmap_get_create(&get_hdl) != IDMAP_SUCCESS)
goto out;
if (isuser) {
err = idmap_get_sidbyuid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
} else {
err = idmap_get_sidbygid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
}
if (err == IDMAP_SUCCESS &&
idmap_get_mappings(get_hdl) == IDMAP_SUCCESS &&
status == IDMAP_SUCCESS)
err = 0;
else
err = EINVAL;
out:
if (get_hdl)
idmap_get_destroy(get_hdl);
return (err);
}
#endif /* HAVE_IDMAP */
/*
* convert the propname into parameters needed by kernel
* Eg: userquota@ahrens -> ZFS_PROP_USERQUOTA, "", 126829
* Eg: userused@matt@domain -> ZFS_PROP_USERUSED, "S-1-123-456", 789
* Eg: groupquota@staff -> ZFS_PROP_GROUPQUOTA, "", 1234
* Eg: groupused@staff -> ZFS_PROP_GROUPUSED, "", 1234
* Eg: projectquota@123 -> ZFS_PROP_PROJECTQUOTA, "", 123
* Eg: projectused@789 -> ZFS_PROP_PROJECTUSED, "", 789
*/
static int
userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp)
{
zfs_userquota_prop_t type;
char *cp;
boolean_t isuser;
boolean_t isgroup;
boolean_t isproject;
struct passwd *pw;
struct group *gr;
domain[0] = '\0';
/* Figure out the property type ({user|group|project}{quota|space}) */
for (type = 0; type < ZFS_NUM_USERQUOTA_PROPS; type++) {
if (strncmp(propname, zfs_userquota_prop_prefixes[type],
strlen(zfs_userquota_prop_prefixes[type])) == 0)
break;
}
if (type == ZFS_NUM_USERQUOTA_PROPS)
return (EINVAL);
*typep = type;
isuser = (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_USERUSED ||
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_USEROBJUSED);
isgroup = (type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_GROUPUSED ||
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_GROUPOBJUSED);
isproject = (type == ZFS_PROP_PROJECTQUOTA ||
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTOBJQUOTA ||
type == ZFS_PROP_PROJECTOBJUSED);
cp = strchr(propname, '@') + 1;
if (isuser &&
getpwnam_r(cp, &gpwd, rpbuf, sizeof (rpbuf), &pw) == 0 &&
pw != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = pw->pw_uid;
} else if (isgroup &&
getgrnam_r(cp, &ggrp, rpbuf, sizeof (rpbuf), &gr) == 0 &&
gr != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = gr->gr_gid;
} else if (!isproject && strchr(cp, '@')) {
#ifdef HAVE_IDMAP
/*
* It's a SID name (eg "user@domain") that needs to be
* turned into S-1-domainID-RID.
*/
directory_error_t e;
char *numericsid = NULL;
char *end;
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
if (isuser) {
e = directory_sid_from_user_name(NULL,
cp, &numericsid);
} else {
e = directory_sid_from_group_name(NULL,
cp, &numericsid);
}
if (e != NULL) {
directory_error_free(e);
return (ENOENT);
}
if (numericsid == NULL)
return (ENOENT);
cp = numericsid;
(void) strlcpy(domain, cp, domainlen);
cp = strrchr(domain, '-');
*cp = '\0';
cp++;
errno = 0;
*ridp = strtoull(cp, &end, 10);
free(numericsid);
if (errno != 0 || *end != '\0')
return (EINVAL);
#else
(void) domainlen;
return (ENOSYS);
#endif /* HAVE_IDMAP */
} else {
/* It's a user/group/project ID (eg "12345"). */
uid_t id;
char *end;
id = strtoul(cp, &end, 10);
if (*end != '\0')
return (EINVAL);
if (id > MAXUID && !isproject) {
#ifdef HAVE_IDMAP
/* It's an ephemeral ID. */
idmap_rid_t rid;
char *mapdomain;
if (idmap_id_to_numeric_domain_rid(id, isuser,
&mapdomain, &rid) != 0)
return (ENOENT);
(void) strlcpy(domain, mapdomain, domainlen);
*ridp = rid;
#else
return (ENOSYS);
#endif /* HAVE_IDMAP */
} else {
*ridp = id;
}
}
return (0);
}
static int
zfs_prop_get_userquota_common(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue, zfs_userquota_prop_t *typep)
{
int err;
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
err = userquota_propname_decode(propname,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED),
typep, zc.zc_value, sizeof (zc.zc_value), &zc.zc_guid);
zc.zc_objset_type = *typep;
if (err)
return (err);
err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_USERSPACE_ONE, &zc);
if (err)
return (err);
*propvalue = zc.zc_cookie;
return (0);
}
int
zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
{
zfs_userquota_prop_t type;
return (zfs_prop_get_userquota_common(zhp, propname, propvalue,
&type));
}
int
zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
{
int err;
uint64_t propvalue;
zfs_userquota_prop_t type;
err = zfs_prop_get_userquota_common(zhp, propname, &propvalue,
&type);
if (err)
return (err);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)propvalue);
} else if (propvalue == 0 &&
(type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_PROJECTQUOTA ||
type == ZFS_PROP_PROJECTOBJQUOTA)) {
(void) strlcpy(propbuf, "none", proplen);
} else if (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
type == ZFS_PROP_USERUSED || type == ZFS_PROP_GROUPUSED ||
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTQUOTA) {
zfs_nicebytes(propvalue, propbuf, proplen);
} else {
zfs_nicenum(propvalue, propbuf, proplen);
}
return (0);
}
/*
* propname must start with "written@" or "written#".
*/
int
zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
{
int err;
zfs_cmd_t zc = {"\0"};
const char *snapname;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
assert(zfs_prop_written(propname));
snapname = propname + strlen("written@");
if (strchr(snapname, '@') != NULL || strchr(snapname, '#') != NULL) {
/* full snapshot or bookmark name specified */
(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
} else {
/* snapname is the short name, append it to zhp's fsname */
char *cp;
(void) strlcpy(zc.zc_value, zhp->zfs_name,
sizeof (zc.zc_value));
cp = strchr(zc.zc_value, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(zc.zc_value, snapname - 1, sizeof (zc.zc_value));
}
err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SPACE_WRITTEN, &zc);
if (err)
return (err);
*propvalue = zc.zc_cookie;
return (0);
}
int
zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
{
int err;
uint64_t propvalue;
err = zfs_prop_get_written_int(zhp, propname, &propvalue);
if (err)
return (err);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)propvalue);
} else {
zfs_nicebytes(propvalue, propbuf, proplen);
}
return (0);
}
/*
* Returns the name of the given zfs handle.
*/
const char *
zfs_get_name(const zfs_handle_t *zhp)
{
return (zhp->zfs_name);
}
/*
* Returns the name of the parent pool for the given zfs handle.
*/
const char *
zfs_get_pool_name(const zfs_handle_t *zhp)
{
return (zhp->zpool_hdl->zpool_name);
}
/*
* Returns the type of the given zfs handle.
*/
zfs_type_t
zfs_get_type(const zfs_handle_t *zhp)
{
return (zhp->zfs_type);
}
/*
* Returns the type of the given zfs handle,
* or, if a snapshot, the type of the snapshotted dataset.
*/
zfs_type_t
zfs_get_underlying_type(const zfs_handle_t *zhp)
{
return (zhp->zfs_head_type);
}
/*
* Is one dataset name a child dataset of another?
*
* Needs to handle these cases:
* Dataset 1 "a/foo" "a/foo" "a/foo" "a/foo"
* Dataset 2 "a/fo" "a/foobar" "a/bar/baz" "a/foo/bar"
* Descendant? No. No. No. Yes.
*/
static boolean_t
is_descendant(const char *ds1, const char *ds2)
{
size_t d1len = strlen(ds1);
/* ds2 can't be a descendant if it's smaller */
if (strlen(ds2) < d1len)
return (B_FALSE);
/* otherwise, compare strings and verify that there's a '/' char */
return (ds2[d1len] == '/' && (strncmp(ds1, ds2, d1len) == 0));
}
/*
* Given a complete name, return just the portion that refers to the parent.
* Will return -1 if there is no parent (path is just the name of the
* pool).
*/
static int
parent_name(const char *path, char *buf, size_t buflen)
{
char *slashp;
(void) strlcpy(buf, path, buflen);
if ((slashp = strrchr(buf, '/')) == NULL)
return (-1);
*slashp = '\0';
return (0);
}
int
zfs_parent_name(zfs_handle_t *zhp, char *buf, size_t buflen)
{
return (parent_name(zfs_get_name(zhp), buf, buflen));
}
/*
* If accept_ancestor is false, then check to make sure that the given path has
* a parent, and that it exists. If accept_ancestor is true, then find the
* closest existing ancestor for the given path. In prefixlen return the
* length of already existing prefix of the given path. We also fetch the
* 'zoned' property, which is used to validate property settings when creating
* new datasets.
*/
static int
check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned,
boolean_t accept_ancestor, int *prefixlen)
{
zfs_cmd_t zc = {"\0"};
char parent[ZFS_MAX_DATASET_NAME_LEN];
char *slash;
zfs_handle_t *zhp;
char errbuf[ERRBUFLEN];
uint64_t is_zoned;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot create '%s'"), path);
/* get parent, and check to see if this is just a pool */
if (parent_name(path, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
/* check to see if the pool exists */
if ((slash = strchr(parent, '/')) == NULL)
slash = parent + strlen(parent);
(void) strlcpy(zc.zc_name, parent,
MIN(sizeof (zc.zc_name), slash - parent + 1));
if (zfs_ioctl(hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0 &&
errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* check to see if the parent dataset exists */
while ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
if (errno == ENOENT && accept_ancestor) {
/*
* Go deeper to find an ancestor, give up on top level.
*/
if (parent_name(parent, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
} else if (errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent does not exist"));
return (zfs_error(hdl, EZFS_NOENT, errbuf));
} else
return (zfs_standard_error(hdl, errno, errbuf));
}
is_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned != NULL)
*zoned = is_zoned;
/* we are in a non-global zone, but parent is in the global zone */
if (getzoneid() != GLOBAL_ZONEID && !is_zoned) {
(void) zfs_standard_error(hdl, EPERM, errbuf);
zfs_close(zhp);
return (-1);
}
/* make sure parent is a filesystem */
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent is not a filesystem"));
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
return (-1);
}
zfs_close(zhp);
if (prefixlen != NULL)
*prefixlen = strlen(parent);
return (0);
}
/*
* Finds whether the dataset of the given type(s) exists.
*/
boolean_t
zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types)
{
zfs_handle_t *zhp;
if (!zfs_validate_name(hdl, path, types, B_FALSE))
return (B_FALSE);
/*
* Try to get stats for the dataset, which will tell us if it exists.
*/
if ((zhp = make_dataset_handle(hdl, path)) != NULL) {
int ds_type = zhp->zfs_type;
zfs_close(zhp);
if (types & ds_type)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Given a path to 'target', create all the ancestors between
* the prefixlen portion of the path, and the target itself.
* Fail if the initial prefixlen-ancestor does not already exist.
*/
int
create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
{
zfs_handle_t *h;
char *cp;
const char *opname;
/* make sure prefix exists */
cp = target + prefixlen;
if (*cp != '/') {
assert(strchr(cp, '/') == NULL);
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
} else {
*cp = '\0';
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
*cp = '/';
}
if (h == NULL)
return (-1);
zfs_close(h);
/*
* Attempt to create, mount, and share any ancestor filesystems,
* up to the prefixlen-long one.
*/
for (cp = target + prefixlen + 1;
(cp = strchr(cp, '/')) != NULL; *cp = '/', cp++) {
*cp = '\0';
h = make_dataset_handle(hdl, target);
if (h) {
/* it already exists, nothing to do here */
zfs_close(h);
continue;
}
if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
NULL) != 0) {
opname = dgettext(TEXT_DOMAIN, "create");
goto ancestorerr;
}
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
if (h == NULL) {
opname = dgettext(TEXT_DOMAIN, "open");
goto ancestorerr;
}
if (zfs_mount(h, NULL, 0) != 0) {
opname = dgettext(TEXT_DOMAIN, "mount");
goto ancestorerr;
}
if (zfs_share(h, NULL) != 0) {
opname = dgettext(TEXT_DOMAIN, "share");
goto ancestorerr;
}
zfs_close(h);
}
zfs_commit_shares(NULL);
return (0);
ancestorerr:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to %s ancestor '%s'"), opname, target);
return (-1);
}
/*
* Creates non-existing ancestors of the given path.
*/
int
zfs_create_ancestors(libzfs_handle_t *hdl, const char *path)
{
int prefix;
char *path_copy;
char errbuf[ERRBUFLEN];
int rc = 0;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), path);
/*
* Check that we are not passing the nesting limit
* before we start creating any ancestors.
*/
if (dataset_nestcheck(path) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"maximum name nesting depth exceeded"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0)
return (-1);
if ((path_copy = strdup(path)) != NULL) {
rc = create_parents(hdl, path_copy, prefix);
free(path_copy);
}
if (path_copy == NULL || rc != 0)
return (-1);
return (0);
}
/*
* Create a new filesystem or volume.
*/
int
zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
nvlist_t *props)
{
int ret;
uint64_t size = 0;
uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
uint64_t zoned;
enum lzc_dataset_type ost;
zpool_handle_t *zpool_handle;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
char errbuf[ERRBUFLEN];
char parent[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), path);
/* validate the path, taking care to note the extended error message */
if (!zfs_validate_name(hdl, path, type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
if (dataset_nestcheck(path) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"maximum name nesting depth exceeded"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
/* validate parents exist */
if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0)
return (-1);
/*
* The failure modes when creating a dataset of a different type over
* one that already exists is a little strange. In particular, if you
* try to create a dataset on top of an existing dataset, the ioctl()
* will return ENOENT, not EEXIST. To prevent this from happening, we
* first try to see if the dataset exists.
*/
if (zfs_dataset_exists(hdl, path, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
}
if (type == ZFS_TYPE_VOLUME)
ost = LZC_DATSET_TYPE_ZVOL;
else
ost = LZC_DATSET_TYPE_ZFS;
/* open zpool handle for prop validation */
char pool_path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(pool_path, path, sizeof (pool_path));
/* truncate pool_path at first slash */
char *p = strchr(pool_path, '/');
if (p != NULL)
*p = '\0';
if ((zpool_handle = zpool_open(hdl, pool_path)) == NULL)
return (-1);
if (props && (props = zfs_valid_proplist(hdl, type, props,
zoned, NULL, zpool_handle, B_TRUE, errbuf)) == 0) {
zpool_close(zpool_handle);
return (-1);
}
zpool_close(zpool_handle);
if (type == ZFS_TYPE_VOLUME) {
/*
* If we are creating a volume, the size and block size must
* satisfy a few restraints. First, the blocksize must be a
* valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the
* volsize must be a multiple of the block size, and cannot be
* zero.
*/
if (props == NULL || nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if ((ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&blocksize)) != 0) {
if (ret == ENOENT) {
blocksize = zfs_prop_default_numeric(
ZFS_PROP_VOLBLOCKSIZE);
} else {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume block size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
if (size == 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size cannot be zero"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if (size % blocksize != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size must be a multiple of volume block "
"size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
(void) parent_name(path, parent, sizeof (parent));
if (zfs_crypto_create(hdl, parent, props, NULL, B_TRUE,
&wkeydata, &wkeylen) != 0) {
nvlist_free(props);
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
}
/* create the dataset */
ret = lzc_create(path, ost, props, wkeydata, wkeylen);
nvlist_free(props);
if (wkeydata != NULL)
free(wkeydata);
/* check for failure */
if (ret != 0) {
switch (errno) {
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to set this "
"property or value"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption root's key is not loaded "
"or provided"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ERANGE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property value(s) specified"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
#ifdef _ILP32
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
if (type == ZFS_TYPE_VOLUME)
return (zfs_error(hdl, EZFS_VOLTOOBIG,
errbuf));
zfs_fallthrough;
#endif
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (0);
}
/*
* Destroys the given dataset. The caller must make sure that the filesystem
* isn't mounted, and that there are no active dependents. If the file system
* does not exist this function does nothing.
*/
int
zfs_destroy(zfs_handle_t *zhp, boolean_t defer)
{
int error;
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT && defer)
return (EINVAL);
if (zhp->zfs_type == ZFS_TYPE_BOOKMARK) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, zhp->zfs_name);
error = lzc_destroy_bookmarks(nv, NULL);
fnvlist_free(nv);
if (error != 0) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, error,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
return (0);
}
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, zhp->zfs_name);
error = lzc_destroy_snaps(nv, defer, NULL);
fnvlist_free(nv);
} else {
error = lzc_destroy(zhp->zfs_name);
}
if (error != 0 && error != ENOENT) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
remove_mountpoint(zhp);
return (0);
}
struct destroydata {
nvlist_t *nvl;
const char *snapname;
};
static int
zfs_check_snap_cb(zfs_handle_t *zhp, void *arg)
{
struct destroydata *dd = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
dd->snapname) >= sizeof (name))
return (EINVAL);
if (lzc_exists(name))
fnvlist_add_boolean(dd->nvl, name);
rv = zfs_iter_filesystems_v2(zhp, 0, zfs_check_snap_cb, dd);
zfs_close(zhp);
return (rv);
}
/*
* Destroys all snapshots with the given name in zhp & descendants.
*/
int
zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname, boolean_t defer)
{
int ret;
struct destroydata dd = { 0 };
dd.snapname = snapname;
dd.nvl = fnvlist_alloc();
(void) zfs_check_snap_cb(zfs_handle_dup(zhp), &dd);
if (nvlist_empty(dd.nvl)) {
ret = zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT,
dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"),
zhp->zfs_name, snapname);
} else {
ret = zfs_destroy_snaps_nvl(zhp->zfs_hdl, dd.nvl, defer);
}
fnvlist_free(dd.nvl);
return (ret);
}
/*
* Destroys all the snapshots named in the nvlist.
*/
int
zfs_destroy_snaps_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, boolean_t defer)
{
nvlist_t *errlist = NULL;
nvpair_t *pair;
int ret = zfs_destroy_snaps_nvl_os(hdl, snaps);
if (ret != 0)
return (ret);
ret = lzc_destroy_snaps(snaps, defer, &errlist);
if (ret == 0) {
nvlist_free(errlist);
return (0);
}
if (nvlist_empty(errlist)) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshots"));
ret = zfs_standard_error(hdl, ret, errbuf);
}
for (pair = nvlist_next_nvpair(errlist, NULL);
pair != NULL; pair = nvlist_next_nvpair(errlist, pair)) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshot %s"),
nvpair_name(pair));
switch (fnvpair_value_int32(pair)) {
case EEXIST:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "snapshot is cloned"));
ret = zfs_error(hdl, EZFS_EXISTS, errbuf);
break;
default:
ret = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
nvlist_free(errlist);
return (ret);
}
/*
* Clones the given dataset. The target must be of the same type as the source.
*/
int
zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
{
char parent[ZFS_MAX_DATASET_NAME_LEN];
int ret;
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
uint64_t zoned;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), target);
/* validate the target/clone name */
if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents exist */
if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0)
return (-1);
(void) parent_name(target, parent, sizeof (parent));
/* do the clone */
if (props) {
zfs_type_t type = ZFS_TYPE_FILESYSTEM;
if (ZFS_IS_VOLUME(zhp))
type = ZFS_TYPE_VOLUME;
if ((props = zfs_valid_proplist(hdl, type, props, zoned,
zhp, zhp->zpool_hdl, B_TRUE, errbuf)) == NULL)
return (-1);
if (zfs_fix_auto_resv(zhp, props) == -1) {
nvlist_free(props);
return (-1);
}
}
if (zfs_crypto_clone_check(hdl, zhp, parent, props) != 0) {
nvlist_free(props);
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
}
ret = lzc_clone(target, zhp->zfs_name, props);
nvlist_free(props);
if (ret != 0) {
switch (errno) {
case ENOENT:
/*
* The parent doesn't exist. We should have caught this
* above, but there may a race condition that has since
* destroyed the parent.
*
* At this point, we don't know whether it's the source
* that doesn't exist anymore, or whether the target
* dataset doesn't exist.
*/
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
case EXDEV:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"source and target pools differ"));
return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET,
errbuf));
default:
return (zfs_standard_error(zhp->zfs_hdl, errno,
errbuf));
}
}
return (ret);
}
/*
* Promotes the given clone fs to be the clone parent.
*/
int
zfs_promote(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int ret;
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot promote '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots can not be promoted"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (zhp->zfs_dmustats.dds_origin[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a cloned filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
ret = lzc_promote(zhp->zfs_name, snapname, sizeof (snapname));
if (ret != 0) {
switch (ret) {
case EACCES:
/*
* Promoting encrypted dataset outside its
* encryption root.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot promote dataset outside its "
"encryption root"));
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
case EEXIST:
/* There is a conflicting snapshot name. */
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"conflicting snapshot '%s' from parent '%s'"),
snapname, zhp->zfs_dmustats.dds_origin);
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
default:
return (zfs_standard_error(hdl, ret, errbuf));
}
}
return (ret);
}
typedef struct snapdata {
nvlist_t *sd_nvl;
const char *sd_snapname;
} snapdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snapdata_t *sd = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) == 0) {
if (snprintf(name, sizeof (name), "%s@%s", zfs_get_name(zhp),
sd->sd_snapname) >= sizeof (name))
return (EINVAL);
fnvlist_add_boolean(sd->sd_nvl, name);
rv = zfs_iter_filesystems_v2(zhp, 0, zfs_snapshot_cb, sd);
}
zfs_close(zhp);
return (rv);
}
/*
* Creates snapshots. The keys in the snaps nvlist are the snapshots to be
* created.
*/
int
zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, nvlist_t *props)
{
int ret;
char errbuf[ERRBUFLEN];
nvpair_t *elem;
nvlist_t *errors;
zpool_handle_t *zpool_hdl;
char pool[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create snapshots "));
elem = NULL;
while ((elem = nvlist_next_nvpair(snaps, elem)) != NULL) {
const char *snapname = nvpair_name(elem);
/* validate the target name */
if (!zfs_validate_name(hdl, snapname, ZFS_TYPE_SNAPSHOT,
B_TRUE)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), snapname);
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
/*
* get pool handle for prop validation. assumes all snaps are in the
* same pool, as does lzc_snapshot (below).
*/
elem = nvlist_next_nvpair(snaps, NULL);
if (elem == NULL)
return (-1);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/@")] = '\0';
zpool_hdl = zpool_open(hdl, pool);
if (zpool_hdl == NULL)
return (-1);
if (props != NULL &&
(props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT,
props, B_FALSE, NULL, zpool_hdl, B_FALSE, errbuf)) == NULL) {
zpool_close(zpool_hdl);
return (-1);
}
zpool_close(zpool_hdl);
ret = lzc_snapshot(snaps, props, &errors);
if (ret != 0) {
boolean_t printed = B_FALSE;
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), nvpair_name(elem));
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
printed = B_TRUE;
}
if (!printed) {
switch (ret) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple snapshots of same "
"fs not allowed"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
}
nvlist_free(props);
nvlist_free(errors);
return (ret);
}
int
zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive,
nvlist_t *props)
{
int ret;
snapdata_t sd = { 0 };
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *cp;
zfs_handle_t *zhp;
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot snapshot %s"), path);
if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
(void) strlcpy(fsname, path, sizeof (fsname));
cp = strchr(fsname, '@');
*cp = '\0';
sd.sd_snapname = cp + 1;
if ((zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
return (-1);
}
sd.sd_nvl = fnvlist_alloc();
if (recursive) {
(void) zfs_snapshot_cb(zfs_handle_dup(zhp), &sd);
} else {
fnvlist_add_boolean(sd.sd_nvl, path);
}
ret = zfs_snapshot_nvl(hdl, sd.sd_nvl, props);
fnvlist_free(sd.sd_nvl);
zfs_close(zhp);
return (ret);
}
/*
* Destroy any more recent snapshots. We invoke this callback on any dependents
* of the snapshot first. If the 'cb_dependent' member is non-zero, then this
* is a dependent and we should just destroy it without checking the transaction
* group.
*/
typedef struct rollback_data {
const char *cb_target; /* the snapshot */
uint64_t cb_create; /* creation time reference */
boolean_t cb_error;
boolean_t cb_force;
} rollback_data_t;
static int
rollback_destroy_dependent(zfs_handle_t *zhp, void *data)
{
rollback_data_t *cbp = data;
prop_changelist_t *clp;
/* We must destroy this clone; first unmount it */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
cbp->cb_force ? MS_FORCE: 0);
if (clp == NULL || changelist_prefix(clp) != 0) {
cbp->cb_error = B_TRUE;
zfs_close(zhp);
return (0);
}
if (zfs_destroy(zhp, B_FALSE) != 0)
cbp->cb_error = B_TRUE;
else
changelist_remove(clp, zhp->zfs_name);
(void) changelist_postfix(clp);
changelist_free(clp);
zfs_close(zhp);
return (0);
}
static int
rollback_destroy(zfs_handle_t *zhp, void *data)
{
rollback_data_t *cbp = data;
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
cbp->cb_error |= zfs_iter_dependents_v2(zhp, 0, B_FALSE,
rollback_destroy_dependent, cbp);
cbp->cb_error |= zfs_destroy(zhp, B_FALSE);
}
zfs_close(zhp);
return (0);
}
/*
* Given a dataset, rollback to a specific snapshot, discarding any
* data changes since then and making it the active dataset.
*
* Any snapshots and bookmarks more recent than the target are
* destroyed, along with their dependents (i.e. clones).
*/
int
zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force)
{
rollback_data_t cb = { 0 };
int err;
boolean_t restore_resv = 0;
uint64_t old_volsize = 0, new_volsize;
zfs_prop_t resv_prop = { 0 };
uint64_t min_txg = 0;
assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
zhp->zfs_type == ZFS_TYPE_VOLUME);
/*
* Destroy all recent snapshots and their dependents.
*/
cb.cb_force = force;
cb.cb_target = snap->zfs_name;
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
if (cb.cb_create > 0)
min_txg = cb.cb_create;
(void) zfs_iter_snapshots_v2(zhp, 0, rollback_destroy, &cb,
min_txg, 0);
(void) zfs_iter_bookmarks_v2(zhp, 0, rollback_destroy, &cb);
if (cb.cb_error)
return (-1);
/*
* Now that we have verified that the snapshot is the latest,
* rollback to the given snapshot.
*/
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
restore_resv =
(old_volsize == zfs_prop_get_int(zhp, resv_prop));
}
/*
* Pass both the filesystem and the wanted snapshot names,
* we would get an error back if the snapshot is destroyed or
* a new snapshot is created before this request is processed.
*/
err = lzc_rollback_to(zhp->zfs_name, snap->zfs_name);
if (err != 0) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
zhp->zfs_name);
switch (err) {
case EEXIST:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"there is a snapshot or bookmark more recent "
"than '%s'"), snap->zfs_name);
(void) zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf);
break;
case ESRCH:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is not found among snapshots of '%s'"),
snap->zfs_name, zhp->zfs_name);
(void) zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf);
break;
case EINVAL:
(void) zfs_error(zhp->zfs_hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(zhp->zfs_hdl, err, errbuf);
}
return (err);
}
/*
* For volumes, if the pre-rollback volsize matched the pre-
* rollback reservation and the volsize has changed then set
* the reservation property to the post-rollback volsize.
* Make a new handle since the rollback closed the dataset.
*/
if ((zhp->zfs_type == ZFS_TYPE_VOLUME) &&
(zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) {
if (restore_resv) {
new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (old_volsize != new_volsize)
err = zfs_prop_set_int(zhp, resv_prop,
new_volsize);
}
zfs_close(zhp);
}
return (err);
}
/*
* Renames the given dataset.
*/
int
zfs_rename(zfs_handle_t *zhp, const char *target, renameflags_t flags)
{
int ret = 0;
zfs_cmd_t zc = {"\0"};
char *delim;
prop_changelist_t *cl = NULL;
char parent[ZFS_MAX_DATASET_NAME_LEN];
char property[ZFS_MAXPROPLEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[ERRBUFLEN];
/* if we have the same exact name, just return success */
if (strcmp(zhp->zfs_name, target) == 0)
return (0);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename to '%s'"), target);
/* make sure source name is valid */
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/*
* Make sure the target name is valid
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
if ((strchr(target, '@') == NULL) ||
*target == '@') {
/*
* Snapshot target name is abbreviated,
* reconstruct full dataset name
*/
(void) strlcpy(parent, zhp->zfs_name,
sizeof (parent));
delim = strchr(parent, '@');
if (strchr(target, '@') == NULL)
*(++delim) = '\0';
else
*delim = '\0';
(void) strlcat(parent, target, sizeof (parent));
target = parent;
} else {
/*
* Make sure we're renaming within the same dataset.
*/
delim = strchr(target, '@');
if (strncmp(zhp->zfs_name, target, delim - target)
!= 0 || zhp->zfs_name[delim - target] != '@') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots must be part of same "
"dataset"));
return (zfs_error(hdl, EZFS_CROSSTARGET,
errbuf));
}
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
} else {
if (flags.recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"recursive rename must be a snapshot"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents */
if (check_parents(hdl, target, NULL, B_FALSE, NULL) != 0)
return (-1);
/* make sure we're in the same pool */
verify((delim = strchr(target, '/')) != NULL);
if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
zhp->zfs_name[delim - target] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"datasets must be within same pool"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
/* new name cannot be a child of the current dataset name */
if (is_descendant(zhp->zfs_name, target)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"New dataset name cannot be a descendant of "
"current dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name);
if (getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
/*
* Avoid unmounting file systems with mountpoint property set to
* 'legacy' or 'none' even if -u option is not given.
*/
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
!flags.recursive && !flags.nounmount &&
zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, property,
sizeof (property), NULL, NULL, 0, B_FALSE) == 0 &&
(strcmp(property, "legacy") == 0 ||
strcmp(property, "none") == 0)) {
flags.nounmount = B_TRUE;
}
if (flags.recursive) {
char *parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name);
delim = strchr(parentname, '@');
*delim = '\0';
zfs_handle_t *zhrp = zfs_open(zhp->zfs_hdl, parentname,
ZFS_TYPE_DATASET);
free(parentname);
if (zhrp == NULL) {
ret = -1;
goto error;
}
zfs_close(zhrp);
} else if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT) {
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME,
flags.nounmount ? CL_GATHER_DONT_UNMOUNT :
CL_GATHER_ITER_MOUNTED,
flags.forceunmount ? MS_FORCE : 0)) == NULL)
return (-1);
if (changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
ret = -1;
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
}
if (ZFS_IS_VOLUME(zhp))
zc.zc_objset_type = DMU_OST_ZVOL;
else
zc.zc_objset_type = DMU_OST_ZFS;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
zc.zc_cookie = !!flags.recursive;
zc.zc_cookie |= (!!flags.nounmount) << 1;
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) {
/*
* if it was recursive, the one that actually failed will
* be in zc.zc_name
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename '%s'"), zc.zc_name);
if (flags.recursive && errno == EEXIST) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"a child dataset already has a snapshot "
"with the new name"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
} else if (errno == EACCES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot move encrypted child outside of "
"its encryption root"));
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
} else {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
}
/*
* On failure, we still want to remount any filesystems that
* were previously mounted, so we don't alter the system state.
*/
if (cl != NULL)
(void) changelist_postfix(cl);
} else {
if (cl != NULL) {
changelist_rename(cl, zfs_get_name(zhp), target);
ret = changelist_postfix(cl);
}
+ (void) strlcpy(zhp->zfs_name, target, sizeof (zhp->zfs_name));
}
error:
if (cl != NULL) {
changelist_free(cl);
}
return (ret);
}
nvlist_t *
zfs_get_all_props(zfs_handle_t *zhp)
{
return (zhp->zfs_props);
}
nvlist_t *
zfs_get_recvd_props(zfs_handle_t *zhp)
{
if (zhp->zfs_recvd_props == NULL)
if (get_recvd_props_ioctl(zhp) != 0)
return (NULL);
return (zhp->zfs_recvd_props);
}
nvlist_t *
zfs_get_user_props(zfs_handle_t *zhp)
{
return (zhp->zfs_user_props);
}
/*
* This function is used by 'zfs list' to determine the exact set of columns to
* display, and their maximum widths. This does two main things:
*
* - If this is a list of all properties, then expand the list to include
* all native properties, and set a flag so that for each dataset we look
* for new unique user properties and add them to the list.
*
* - For non fixed-width properties, keep track of the maximum width seen
* so that we can size the column appropriately. If the user has
* requested received property values, we also need to compute the width
* of the RECEIVED column.
*/
int
zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp, boolean_t received,
boolean_t literal)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zprop_list_t *entry;
zprop_list_t **last, **start;
nvlist_t *userprops, *propval;
nvpair_t *elem;
const char *strval;
char buf[ZFS_MAXPROPLEN];
if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0)
return (-1);
userprops = zfs_get_user_props(zhp);
entry = *plp;
if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) {
/*
* Go through and add any user properties as necessary. We
* start by incrementing our list pointer to the first
* non-native property.
*/
start = plp;
while (*start != NULL) {
if ((*start)->pl_prop == ZPROP_USERPROP)
break;
start = &(*start)->pl_next;
}
elem = NULL;
while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) {
/*
* See if we've already found this property in our list.
*/
for (last = start; *last != NULL;
last = &(*last)->pl_next) {
if (strcmp((*last)->pl_user_prop,
nvpair_name(elem)) == 0)
break;
}
if (*last == NULL) {
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_user_prop =
zfs_strdup(hdl, nvpair_name(elem));
entry->pl_prop = ZPROP_USERPROP;
entry->pl_width = strlen(nvpair_name(elem));
entry->pl_all = B_TRUE;
*last = entry;
}
}
}
/*
* Now go through and check the width of any non-fixed columns
*/
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed && !literal)
continue;
if (entry->pl_prop != ZPROP_USERPROP) {
if (zfs_prop_get(zhp, entry->pl_prop,
buf, sizeof (buf), NULL, NULL, 0, literal) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
if (received && zfs_prop_get_recvd(zhp,
zfs_prop_to_name(entry->pl_prop),
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
} else {
if (nvlist_lookup_nvlist(userprops, entry->pl_user_prop,
&propval) == 0) {
strval = fnvlist_lookup_string(propval,
ZPROP_VALUE);
if (strlen(strval) > entry->pl_width)
entry->pl_width = strlen(strval);
}
if (received && zfs_prop_get_recvd(zhp,
entry->pl_user_prop,
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
}
}
return (0);
}
void
zfs_prune_proplist(zfs_handle_t *zhp, uint8_t *props)
{
nvpair_t *curr;
nvpair_t *next;
/*
* Keep a reference to the props-table against which we prune the
* properties.
*/
zhp->zfs_props_table = props;
curr = nvlist_next_nvpair(zhp->zfs_props, NULL);
while (curr) {
zfs_prop_t zfs_prop = zfs_name_to_prop(nvpair_name(curr));
next = nvlist_next_nvpair(zhp->zfs_props, curr);
/*
* User properties will result in ZPROP_USERPROP (an alias
* for ZPROP_INVAL), and since we
* only know how to prune standard ZFS properties, we always
* leave these in the list. This can also happen if we
* encounter an unknown DSL property (when running older
* software, for example).
*/
if (zfs_prop != ZPROP_USERPROP && props[zfs_prop] == B_FALSE)
(void) nvlist_remove(zhp->zfs_props,
nvpair_name(curr), nvpair_type(curr));
curr = next;
}
}
static int
zfs_smb_acl_mgmt(libzfs_handle_t *hdl, char *dataset, char *path,
zfs_smb_acl_op_t cmd, char *resource1, char *resource2)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *nvlist = NULL;
int error;
(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value));
zc.zc_cookie = (uint64_t)cmd;
if (cmd == ZFS_SMB_ACL_RENAME) {
if (nvlist_alloc(&nvlist, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (0);
}
}
switch (cmd) {
case ZFS_SMB_ACL_ADD:
case ZFS_SMB_ACL_REMOVE:
(void) strlcpy(zc.zc_string, resource1, sizeof (zc.zc_string));
break;
case ZFS_SMB_ACL_RENAME:
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_SRC,
resource1) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_TARGET,
resource2) != 0) {
(void) no_memory(hdl);
return (-1);
}
zcmd_write_src_nvlist(hdl, &zc, nvlist);
break;
case ZFS_SMB_ACL_PURGE:
break;
default:
return (-1);
}
error = ioctl(hdl->libzfs_fd, ZFS_IOC_SMB_ACL, &zc);
nvlist_free(nvlist);
return (error);
}
int
zfs_smb_acl_add(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_ADD,
resource, NULL));
}
int
zfs_smb_acl_remove(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_REMOVE,
resource, NULL));
}
int
zfs_smb_acl_purge(libzfs_handle_t *hdl, char *dataset, char *path)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_PURGE,
NULL, NULL));
}
int
zfs_smb_acl_rename(libzfs_handle_t *hdl, char *dataset, char *path,
char *oldname, char *newname)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_RENAME,
oldname, newname));
}
int
zfs_userspace(zfs_handle_t *zhp, zfs_userquota_prop_t type,
zfs_userspace_cb_t func, void *arg)
{
zfs_cmd_t zc = {"\0"};
zfs_useracct_t buf[100];
libzfs_handle_t *hdl = zhp->zfs_hdl;
int ret;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_objset_type = type;
zc.zc_nvlist_dst = (uintptr_t)buf;
for (;;) {
zfs_useracct_t *zua = buf;
zc.zc_nvlist_dst_size = sizeof (buf);
if (zfs_ioctl(hdl, ZFS_IOC_USERSPACE_MANY, &zc) != 0) {
if ((errno == ENOTSUP &&
(type == ZFS_PROP_USEROBJUSED ||
type == ZFS_PROP_GROUPOBJUSED ||
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_PROJECTOBJUSED ||
type == ZFS_PROP_PROJECTOBJQUOTA ||
type == ZFS_PROP_PROJECTUSED ||
type == ZFS_PROP_PROJECTQUOTA)))
break;
return (zfs_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN,
"cannot get used/quota for %s"), zc.zc_name));
}
if (zc.zc_nvlist_dst_size == 0)
break;
while (zc.zc_nvlist_dst_size > 0) {
if ((ret = func(arg, zua->zu_domain, zua->zu_rid,
zua->zu_space)) != 0)
return (ret);
zua++;
zc.zc_nvlist_dst_size -= sizeof (zfs_useracct_t);
}
}
return (0);
}
struct holdarg {
nvlist_t *nvl;
const char *snapname;
const char *tag;
boolean_t recursive;
int error;
};
static int
zfs_hold_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
ha->snapname) >= sizeof (name))
return (EINVAL);
if (lzc_exists(name))
fnvlist_add_string(ha->nvl, name, ha->tag);
if (ha->recursive)
rv = zfs_iter_filesystems_v2(zhp, 0, zfs_hold_one, ha);
zfs_close(zhp);
return (rv);
}
int
zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive, int cleanup_fd)
{
int ret;
struct holdarg ha;
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
(void) zfs_hold_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
char errbuf[ERRBUFLEN];
fnvlist_free(ha.nvl);
ret = ENOENT;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s@%s'"),
zhp->zfs_name, snapname);
(void) zfs_standard_error(zhp->zfs_hdl, ret, errbuf);
return (ret);
}
ret = zfs_hold_nvl(zhp, cleanup_fd, ha.nvl);
fnvlist_free(ha.nvl);
return (ret);
}
int
zfs_hold_nvl(zfs_handle_t *zhp, int cleanup_fd, nvlist_t *holds)
{
int ret;
nvlist_t *errors;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[ERRBUFLEN];
nvpair_t *elem;
errors = NULL;
ret = lzc_hold(holds, cleanup_fd, &errors);
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
if (nvlist_empty(errors)) {
/* no hold-specific errors */
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot hold"));
switch (ret) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s'"), nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case E2BIG:
/*
* Temporary tags wind up having the ds object id
* prepended. So even if we passed the length check
* above, it's still possible for the tag to wind
* up being slightly too long.
*/
(void) zfs_error(hdl, EZFS_TAGTOOLONG, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case EEXIST:
(void) zfs_error(hdl, EZFS_REFTAG_HOLD, errbuf);
break;
default:
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
}
}
fnvlist_free(errors);
return (ret);
}
static int
zfs_release_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
nvlist_t *existing_holds;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
ha->snapname) >= sizeof (name)) {
ha->error = EINVAL;
rv = EINVAL;
}
if (lzc_get_holds(name, &existing_holds) != 0) {
ha->error = ENOENT;
} else if (!nvlist_exists(existing_holds, ha->tag)) {
ha->error = ESRCH;
} else {
nvlist_t *torelease = fnvlist_alloc();
fnvlist_add_boolean(torelease, ha->tag);
fnvlist_add_nvlist(ha->nvl, name, torelease);
fnvlist_free(torelease);
}
if (ha->recursive)
rv = zfs_iter_filesystems_v2(zhp, 0, zfs_release_one, ha);
zfs_close(zhp);
return (rv);
}
int
zfs_release(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive)
{
int ret;
struct holdarg ha;
nvlist_t *errors = NULL;
nvpair_t *elem;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[ERRBUFLEN];
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
ha.error = 0;
(void) zfs_release_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
fnvlist_free(ha.nvl);
ret = ha.error;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s@%s'"),
zhp->zfs_name, snapname);
if (ret == ESRCH) {
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
} else {
(void) zfs_standard_error(hdl, ret, errbuf);
}
return (ret);
}
ret = lzc_release(ha.nvl, &errors);
fnvlist_free(ha.nvl);
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
if (nvlist_empty(errors)) {
/* no hold-specific errors */
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot release"));
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
default:
(void) zfs_standard_error(hdl, errno, errbuf);
}
}
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s'"),
nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case ESRCH:
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
}
}
fnvlist_free(errors);
return (ret);
}
int
zfs_get_fsacl(zfs_handle_t *zhp, nvlist_t **nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
int nvsz = 2048;
void *nvbuf;
int err = 0;
char errbuf[ERRBUFLEN];
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
tryagain:
nvbuf = malloc(nvsz);
if (nvbuf == NULL) {
err = (zfs_error(hdl, EZFS_NOMEM, zfs_strerror(errno)));
goto out;
}
zc.zc_nvlist_dst_size = nvsz;
zc.zc_nvlist_dst = (uintptr_t)nvbuf;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_GET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOMEM:
free(nvbuf);
nvsz = zc.zc_nvlist_dst_size;
goto tryagain;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error(hdl, errno, errbuf);
break;
}
} else {
/* success */
int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0);
if (rc) {
err = zfs_standard_error_fmt(hdl, rc, dgettext(
TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
}
}
free(nvbuf);
out:
return (err);
}
int
zfs_set_fsacl(zfs_handle_t *zhp, boolean_t un, nvlist_t *nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *nvbuf;
char errbuf[ERRBUFLEN];
size_t nvsz;
int err;
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
err = nvlist_size(nvl, &nvsz, NV_ENCODE_NATIVE);
assert(err == 0);
nvbuf = malloc(nvsz);
err = nvlist_pack(nvl, &nvbuf, &nvsz, NV_ENCODE_NATIVE, 0);
assert(err == 0);
zc.zc_nvlist_src_size = nvsz;
zc.zc_nvlist_src = (uintptr_t)nvbuf;
zc.zc_perm_action = un;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_SET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
free(nvbuf);
return (err);
}
int
zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl)
{
int err;
char errbuf[ERRBUFLEN];
err = lzc_get_holds(zhp->zfs_name, nvl);
if (err != 0) {
libzfs_handle_t *hdl = zhp->zfs_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"),
zhp->zfs_name);
switch (err) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
return (err);
}
/*
* The theory of raidz space accounting
*
* The "referenced" property of RAIDZ vdevs is scaled such that a 128KB block
* will "reference" 128KB, even though it allocates more than that, to store the
* parity information (and perhaps skip sectors). This concept of the
* "referenced" (and other DMU space accounting) being lower than the allocated
* space by a constant factor is called "raidz deflation."
*
* As mentioned above, the constant factor for raidz deflation assumes a 128KB
* block size. However, zvols typically have a much smaller block size (default
* 8KB). These smaller blocks may require proportionally much more parity
* information (and perhaps skip sectors). In this case, the change to the
* "referenced" property may be much more than the logical block size.
*
* Suppose a raidz vdev has 5 disks with ashift=12. A 128k block may be written
* as follows.
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | D8 | D16 | D24 |
* | P1 | D1 | D9 | D17 | D25 |
* | P2 | D2 | D10 | D18 | D26 |
* | P3 | D3 | D11 | D19 | D27 |
* | P4 | D4 | D12 | D20 | D28 |
* | P5 | D5 | D13 | D21 | D29 |
* | P6 | D6 | D14 | D22 | D30 |
* | P7 | D7 | D15 | D23 | D31 |
* +-------+-------+-------+-------+-------+
*
* Above, notice that 160k was allocated: 8 x 4k parity sectors + 32 x 4k data
* sectors. The dataset's referenced will increase by 128k and the pool's
* allocated and free properties will be adjusted by 160k.
*
* A 4k block written to the same raidz vdev will require two 4k sectors. The
* blank cells represent unallocated space.
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | | | |
* +-------+-------+-------+-------+-------+
*
* Above, notice that the 4k block required one sector for parity and another
* for data. vdev_raidz_asize() will return 8k and as such the pool's allocated
* and free properties will be adjusted by 8k. The dataset will not be charged
* 8k. Rather, it will be charged a value that is scaled according to the
* overhead of the 128k block on the same vdev. This 8k allocation will be
* charged 8k * 128k / 160k. 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as
* calculated in the 128k block example above.
*
* Every raidz allocation is sized to be a multiple of nparity+1 sectors. That
* is, every raidz1 allocation will be a multiple of 2 sectors, raidz2
* allocations are a multiple of 3 sectors, and raidz3 allocations are a
* multiple of of 4 sectors. When a block does not fill the required number of
* sectors, skip blocks (sectors) are used.
*
* An 8k block being written to a raidz vdev may be written as follows:
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | D1 | S0 | |
* +-------+-------+-------+-------+-------+
*
* In order to maintain the nparity+1 allocation size, a skip block (S0) was
* added. For this 8k block, the pool's allocated and free properties are
* adjusted by 16k and the dataset's referenced is increased by 16k * 128k /
* 160k. Again, 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as calculated in
* the 128k block example above.
*
* The situation is slightly different for dRAID since the minimum allocation
* size is the full group width. The same 8K block above would be written as
* follows in a dRAID group:
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | D1 | S0 | S1 |
* +-------+-------+-------+-------+-------+
*
* Compression may lead to a variety of block sizes being written for the same
* volume or file. There is no clear way to reserve just the amount of space
* that will be required, so the worst case (no compression) is assumed.
* Note that metadata blocks will typically be compressed, so the reservation
* size returned by zvol_volsize_to_reservation() will generally be slightly
* larger than the maximum that the volume can reference.
*/
/*
* Derived from function of same name in module/zfs/vdev_raidz.c. Returns the
* amount of space (in bytes) that will be allocated for the specified block
* size. Note that the "referenced" space accounted will be less than this, but
* not necessarily equal to "blksize", due to RAIDZ deflation.
*/
static uint64_t
vdev_raidz_asize(uint64_t ndisks, uint64_t nparity, uint64_t ashift,
uint64_t blksize)
{
uint64_t asize, ndata;
ASSERT3U(ndisks, >, nparity);
ndata = ndisks - nparity;
asize = ((blksize - 1) >> ashift) + 1;
asize += nparity * ((asize + ndata - 1) / ndata);
asize = roundup(asize, nparity + 1) << ashift;
return (asize);
}
/*
* Derived from function of same name in module/zfs/vdev_draid.c. Returns the
* amount of space (in bytes) that will be allocated for the specified block
* size.
*/
static uint64_t
vdev_draid_asize(uint64_t ndisks, uint64_t nparity, uint64_t ashift,
uint64_t blksize)
{
ASSERT3U(ndisks, >, nparity);
uint64_t ndata = ndisks - nparity;
uint64_t rows = ((blksize - 1) / (ndata << ashift)) + 1;
uint64_t asize = (rows * ndisks) << ashift;
return (asize);
}
/*
* Determine how much space will be allocated if it lands on the most space-
* inefficient top-level vdev. Returns the size in bytes required to store one
* copy of the volume data. See theory comment above.
*/
static uint64_t
volsize_from_vdevs(zpool_handle_t *zhp, uint64_t nblocks, uint64_t blksize)
{
nvlist_t *config, *tree, **vdevs;
uint_t nvdevs;
uint64_t ret = 0;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&vdevs, &nvdevs) != 0) {
return (nblocks * blksize);
}
for (int v = 0; v < nvdevs; v++) {
const char *type;
uint64_t nparity, ashift, asize, tsize;
uint64_t volsize;
if (nvlist_lookup_string(vdevs[v], ZPOOL_CONFIG_TYPE,
&type) != 0)
continue;
if (strcmp(type, VDEV_TYPE_RAIDZ) != 0 &&
strcmp(type, VDEV_TYPE_DRAID) != 0)
continue;
if (nvlist_lookup_uint64(vdevs[v],
ZPOOL_CONFIG_NPARITY, &nparity) != 0)
continue;
if (nvlist_lookup_uint64(vdevs[v],
ZPOOL_CONFIG_ASHIFT, &ashift) != 0)
continue;
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
nvlist_t **disks;
uint_t ndisks;
if (nvlist_lookup_nvlist_array(vdevs[v],
ZPOOL_CONFIG_CHILDREN, &disks, &ndisks) != 0)
continue;
/* allocation size for the "typical" 128k block */
tsize = vdev_raidz_asize(ndisks, nparity, ashift,
SPA_OLD_MAXBLOCKSIZE);
/* allocation size for the blksize block */
asize = vdev_raidz_asize(ndisks, nparity, ashift,
blksize);
} else {
uint64_t ndata;
if (nvlist_lookup_uint64(vdevs[v],
ZPOOL_CONFIG_DRAID_NDATA, &ndata) != 0)
continue;
/* allocation size for the "typical" 128k block */
tsize = vdev_draid_asize(ndata + nparity, nparity,
ashift, SPA_OLD_MAXBLOCKSIZE);
/* allocation size for the blksize block */
asize = vdev_draid_asize(ndata + nparity, nparity,
ashift, blksize);
}
/*
* Scale this size down as a ratio of 128k / tsize.
* See theory statement above.
*
* Bitshift is to avoid the case of nblocks * asize < tsize
* producing a size of 0.
*/
volsize = (nblocks * asize) / (tsize >> SPA_MINBLOCKSHIFT);
/*
* If we would blow UINT64_MAX with this next multiplication,
* don't.
*/
if (volsize >
(UINT64_MAX / (SPA_OLD_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT)))
volsize = UINT64_MAX;
else
volsize *= (SPA_OLD_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
if (volsize > ret) {
ret = volsize;
}
}
if (ret == 0) {
ret = nblocks * blksize;
}
return (ret);
}
/*
* Convert the zvol's volume size to an appropriate reservation. See theory
* comment above.
*
* Note: If this routine is updated, it is necessary to update the ZFS test
* suite's shell version in reservation.shlib.
*/
uint64_t
zvol_volsize_to_reservation(zpool_handle_t *zph, uint64_t volsize,
nvlist_t *props)
{
uint64_t numdb;
uint64_t nblocks, volblocksize;
int ncopies;
const char *strval;
if (nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_COPIES), &strval) == 0)
ncopies = atoi(strval);
else
ncopies = 1;
if (nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&volblocksize) != 0)
volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
nblocks = volsize / volblocksize;
/*
* Metadata defaults to using 128k blocks, not volblocksize blocks. For
* this reason, only the data blocks are scaled based on vdev config.
*/
volsize = volsize_from_vdevs(zph, nblocks, volblocksize);
/* start with metadnode L0-L6 */
numdb = 7;
/* calculate number of indirects */
while (nblocks > 1) {
nblocks += DNODES_PER_LEVEL - 1;
nblocks /= DNODES_PER_LEVEL;
numdb += nblocks;
}
numdb *= MIN(SPA_DVAS_PER_BP, ncopies + 1);
volsize *= ncopies;
/*
* this is exactly DN_MAX_INDBLKSHIFT when metadata isn't
* compressed, but in practice they compress down to about
* 1100 bytes
*/
numdb *= 1ULL << DN_MAX_INDBLKSHIFT;
volsize += numdb;
return (volsize);
}
/*
* Wait for the given activity and return the status of the wait (whether or not
* any waiting was done) in the 'waited' parameter. Non-existent fses are
* reported via the 'missing' parameter, rather than by printing an error
* message. This is convenient when this function is called in a loop over a
* long period of time (as it is, for example, by zfs's wait cmd). In that
* scenario, a fs being exported or destroyed should be considered a normal
* event, so we don't want to print an error when we find that the fs doesn't
* exist.
*/
int
zfs_wait_status(zfs_handle_t *zhp, zfs_wait_activity_t activity,
boolean_t *missing, boolean_t *waited)
{
int error = lzc_wait_fs(zhp->zfs_name, activity, waited);
*missing = (error == ENOENT);
if (*missing)
return (0);
if (error != 0) {
(void) zfs_standard_error_fmt(zhp->zfs_hdl, error,
dgettext(TEXT_DOMAIN, "error waiting in fs '%s'"),
zhp->zfs_name);
}
return (error);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
index b9780720e5a3..97920ce6f21c 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
@@ -1,5634 +1,5646 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2024, Klara, Inc.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stddef.h>
#include <fcntl.h>
#include <sys/mount.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/avl.h>
#include <sys/debug.h>
#include <sys/stat.h>
#include <pthread.h>
#include <umem.h>
#include <time.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <libzutil.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "zfs_fletcher.h"
#include "libzfs_impl.h"
#include <cityhash.h>
#include <zlib.h>
#include <sys/zio_checksum.h>
#include <sys/dsl_crypt.h>
#include <sys/ddt.h>
#include <sys/socket.h>
#include <sys/sha2.h>
static int zfs_receive_impl(libzfs_handle_t *, const char *, const char *,
recvflags_t *, int, const char *, nvlist_t *, avl_tree_t *, char **,
const char *, nvlist_t *);
static int guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name);
static int guid_to_name(libzfs_handle_t *, const char *,
uint64_t, boolean_t, char *);
typedef struct progress_arg {
zfs_handle_t *pa_zhp;
int pa_fd;
boolean_t pa_parsable;
boolean_t pa_estimate;
int pa_verbosity;
boolean_t pa_astitle;
boolean_t pa_progress;
uint64_t pa_size;
} progress_arg_t;
static int
dump_record(dmu_replay_record_t *drr, void *payload, size_t payload_len,
zio_cksum_t *zc, int outfd)
{
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
fletcher_4_incremental_native(drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), zc);
if (drr->drr_type != DRR_BEGIN) {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.
drr_checksum.drr_checksum));
drr->drr_u.drr_checksum.drr_checksum = *zc;
}
fletcher_4_incremental_native(&drr->drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), zc);
if (write(outfd, drr, sizeof (*drr)) == -1)
return (errno);
if (payload_len != 0) {
fletcher_4_incremental_native(payload, payload_len, zc);
if (write(outfd, payload, payload_len) == -1)
return (errno);
}
return (0);
}
/*
* Routines for dealing with the AVL tree of fs-nvlists
*/
typedef struct fsavl_node {
avl_node_t fn_node;
nvlist_t *fn_nvfs;
const char *fn_snapname;
uint64_t fn_guid;
} fsavl_node_t;
static int
fsavl_compare(const void *arg1, const void *arg2)
{
const fsavl_node_t *fn1 = (const fsavl_node_t *)arg1;
const fsavl_node_t *fn2 = (const fsavl_node_t *)arg2;
return (TREE_CMP(fn1->fn_guid, fn2->fn_guid));
}
/*
* Given the GUID of a snapshot, find its containing filesystem and
* (optionally) name.
*/
static nvlist_t *
fsavl_find(avl_tree_t *avl, uint64_t snapguid, const char **snapname)
{
fsavl_node_t fn_find;
fsavl_node_t *fn;
fn_find.fn_guid = snapguid;
fn = avl_find(avl, &fn_find, NULL);
if (fn) {
if (snapname)
*snapname = fn->fn_snapname;
return (fn->fn_nvfs);
}
return (NULL);
}
static void
fsavl_destroy(avl_tree_t *avl)
{
fsavl_node_t *fn;
void *cookie;
if (avl == NULL)
return;
cookie = NULL;
while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL)
free(fn);
avl_destroy(avl);
free(avl);
}
/*
* Given an nvlist, produce an avl tree of snapshots, ordered by guid
*/
static avl_tree_t *
fsavl_create(nvlist_t *fss)
{
avl_tree_t *fsavl;
nvpair_t *fselem = NULL;
if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL)
return (NULL);
avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t),
offsetof(fsavl_node_t, fn_node));
while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) {
nvlist_t *nvfs, *snaps;
nvpair_t *snapelem = NULL;
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
while ((snapelem =
nvlist_next_nvpair(snaps, snapelem)) != NULL) {
fsavl_node_t *fn;
if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) {
fsavl_destroy(fsavl);
return (NULL);
}
fn->fn_nvfs = nvfs;
fn->fn_snapname = nvpair_name(snapelem);
fn->fn_guid = fnvpair_value_uint64(snapelem);
/*
* Note: if there are multiple snaps with the
* same GUID, we ignore all but one.
*/
avl_index_t where = 0;
if (avl_find(fsavl, fn, &where) == NULL)
avl_insert(fsavl, fn, where);
else
free(fn);
}
}
return (fsavl);
}
/*
* Routines for dealing with the giant nvlist of fs-nvlists, etc.
*/
typedef struct send_data {
/*
* assigned inside every recursive call,
* restored from *_save on return:
*
* guid of fromsnap snapshot in parent dataset
* txg of fromsnap snapshot in current dataset
* txg of tosnap snapshot in current dataset
*/
uint64_t parent_fromsnap_guid;
uint64_t fromsnap_txg;
uint64_t tosnap_txg;
/* the nvlists get accumulated during depth-first traversal */
nvlist_t *parent_snaps;
nvlist_t *fss;
nvlist_t *snapprops;
nvlist_t *snapholds; /* user holds */
/* send-receive configuration, does not change during traversal */
const char *fsname;
const char *fromsnap;
const char *tosnap;
boolean_t recursive;
boolean_t raw;
boolean_t doall;
boolean_t replicate;
boolean_t skipmissing;
boolean_t verbose;
boolean_t backup;
boolean_t seenfrom;
boolean_t seento;
boolean_t holds; /* were holds requested with send -h */
boolean_t props;
/*
* The header nvlist is of the following format:
* {
* "tosnap" -> string
* "fromsnap" -> string (if incremental)
* "fss" -> {
* id -> {
*
* "name" -> string (full name; for debugging)
* "parentfromsnap" -> number (guid of fromsnap in parent)
*
* "props" -> { name -> value (only if set here) }
* "snaps" -> { name (lastname) -> number (guid) }
* "snapprops" -> { name (lastname) -> { name -> value } }
* "snapholds" -> { name (lastname) -> { holdname -> crtime } }
*
* "origin" -> number (guid) (if clone)
* "is_encroot" -> boolean
* "sent" -> boolean (not on-disk)
* }
* }
* }
*
*/
} send_data_t;
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv);
/*
* Collect guid, valid props, optionally holds, etc. of a snapshot.
* This interface is intended for use as a zfs_iter_snapshots_v2_sorted visitor.
*/
static int
send_iterate_snap(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
boolean_t isfromsnap, istosnap, istosnapwithnofrom;
char *snapname;
const char *from = sd->fromsnap;
const char *to = sd->tosnap;
snapname = strrchr(zhp->zfs_name, '@');
assert(snapname != NULL);
++snapname;
isfromsnap = (from != NULL && strcmp(from, snapname) == 0);
istosnap = (to != NULL && strcmp(to, snapname) == 0);
istosnapwithnofrom = (istosnap && from == NULL);
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping snapshot %s because it was created "
"after the destination snapshot (%s)\n"),
zhp->zfs_name, to);
}
zfs_close(zhp);
return (0);
}
fnvlist_add_uint64(sd->parent_snaps, snapname, guid);
/*
* NB: if there is no fromsnap here (it's a newly created fs in
* an incremental replication), we will substitute the tosnap.
*/
if (isfromsnap || (sd->parent_fromsnap_guid == 0 && istosnap))
sd->parent_fromsnap_guid = guid;
if (!sd->recursive) {
/*
* To allow a doall stream to work properly
* with a NULL fromsnap
*/
if (sd->doall && from == NULL && !sd->seenfrom)
sd->seenfrom = B_TRUE;
if (!sd->seenfrom && isfromsnap) {
sd->seenfrom = B_TRUE;
zfs_close(zhp);
return (0);
}
if ((sd->seento || !sd->seenfrom) && !istosnapwithnofrom) {
zfs_close(zhp);
return (0);
}
if (istosnap)
sd->seento = B_TRUE;
}
nvlist_t *nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
fnvlist_add_nvlist(sd->snapprops, snapname, nv);
fnvlist_free(nv);
if (sd->holds) {
nvlist_t *holds;
if (lzc_get_holds(zhp->zfs_name, &holds) == 0) {
fnvlist_add_nvlist(sd->snapholds, snapname, holds);
fnvlist_free(holds);
}
}
zfs_close(zhp);
return (0);
}
/*
* Collect all valid props from the handle snap into an nvlist.
*/
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv)
{
nvlist_t *props;
if (received_only)
props = zfs_get_recvd_props(zhp);
else
props = zhp->zfs_props;
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
zfs_prop_t prop = zfs_name_to_prop(propname);
if (!zfs_prop_user(propname)) {
/*
* Realistically, this should never happen. However,
* we want the ability to add DSL properties without
* needing to make incompatible version changes. We
* need to ignore unknown properties to allow older
* software to still send datasets containing these
* properties, with the unknown properties elided.
*/
if (prop == ZPROP_INVAL)
continue;
if (zfs_prop_readonly(prop))
continue;
}
nvlist_t *propnv = fnvpair_value_nvlist(elem);
boolean_t isspacelimit = (prop == ZFS_PROP_QUOTA ||
prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION);
if (isspacelimit && zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
continue;
const char *source;
if (nvlist_lookup_string(propnv, ZPROP_SOURCE, &source) == 0) {
if (strcmp(source, zhp->zfs_name) != 0 &&
strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)
continue;
} else {
/*
* May have no source before SPA_VERSION_RECVD_PROPS,
* but is still modifiable.
*/
if (!isspacelimit)
continue;
}
if (zfs_prop_user(propname) ||
zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
const char *value;
value = fnvlist_lookup_string(propnv, ZPROP_VALUE);
fnvlist_add_string(nv, propname, value);
} else {
uint64_t value;
value = fnvlist_lookup_uint64(propnv, ZPROP_VALUE);
fnvlist_add_uint64(nv, propname, value);
}
}
}
/*
* returns snapshot guid
* and returns 0 if the snapshot does not exist
*/
static uint64_t
get_snap_guid(libzfs_handle_t *hdl, const char *fs, const char *snap)
{
char name[MAXPATHLEN + 1];
uint64_t guid = 0;
if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0')
return (guid);
(void) snprintf(name, sizeof (name), "%s@%s", fs, snap);
zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
guid = zfs_prop_get_int(zhp, ZFS_PROP_GUID);
zfs_close(zhp);
}
return (guid);
}
/*
* returns snapshot creation txg
* and returns 0 if the snapshot does not exist
*/
static uint64_t
get_snap_txg(libzfs_handle_t *hdl, const char *fs, const char *snap)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
uint64_t txg = 0;
if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0')
return (txg);
(void) snprintf(name, sizeof (name), "%s@%s", fs, snap);
if (zfs_dataset_exists(hdl, name, ZFS_TYPE_SNAPSHOT)) {
zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
txg = zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG);
zfs_close(zhp);
}
}
return (txg);
}
/*
* Recursively generate nvlists describing datasets. See comment
* for the data structure send_data_t above for description of contents
* of the nvlist.
*/
static int
send_iterate_fs(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
nvlist_t *nvfs = NULL, *nv = NULL;
int rv = 0;
uint64_t min_txg = 0, max_txg = 0;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t fromsnap_txg, tosnap_txg;
char guidstring[64];
/* These fields are restored on return from a recursive call. */
uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid;
uint64_t fromsnap_txg_save = sd->fromsnap_txg;
uint64_t tosnap_txg_save = sd->tosnap_txg;
fromsnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->fromsnap);
if (fromsnap_txg != 0)
sd->fromsnap_txg = fromsnap_txg;
tosnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->tosnap);
if (tosnap_txg != 0)
sd->tosnap_txg = tosnap_txg;
/*
* On the send side, if the current dataset does not have tosnap,
* perform two additional checks:
*
* - Skip sending the current dataset if it was created later than
* the parent tosnap.
* - Return error if the current dataset was created earlier than
* the parent tosnap, unless --skip-missing specified. Then
* just print a warning.
*/
if (sd->tosnap != NULL && tosnap_txg == 0) {
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping dataset %s: snapshot %s does "
"not exist\n"), zhp->zfs_name, sd->tosnap);
}
} else if (sd->skipmissing) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: skipping dataset %s and its children:"
" snapshot %s does not exist\n"),
zhp->zfs_name, sd->tosnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s%s: snapshot %s@%s does not "
"exist\n"), sd->fsname, sd->tosnap, sd->recursive ?
dgettext(TEXT_DOMAIN, " recursively") : "",
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOENT;
}
goto out;
}
nvfs = fnvlist_alloc();
fnvlist_add_string(nvfs, "name", zhp->zfs_name);
fnvlist_add_uint64(nvfs, "parentfromsnap", sd->parent_fromsnap_guid);
if (zhp->zfs_dmustats.dds_origin[0] != '\0') {
zfs_handle_t *origin = zfs_open(zhp->zfs_hdl,
zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT);
if (origin == NULL) {
rv = -1;
goto out;
}
fnvlist_add_uint64(nvfs, "origin",
origin->zfs_dmustats.dds_guid);
zfs_close(origin);
}
/* Iterate over props. */
if (sd->props || sd->backup || sd->recursive) {
nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
fnvlist_add_nvlist(nvfs, "props", nv);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
boolean_t encroot;
/* Determine if this dataset is an encryption root. */
if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0) {
rv = -1;
goto out;
}
if (encroot)
fnvlist_add_boolean(nvfs, "is_encroot");
/*
* Encrypted datasets can only be sent with properties if
* the raw flag is specified because the receive side doesn't
* currently have a mechanism for recursively asking the user
* for new encryption parameters.
*/
if (!sd->raw) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s: encrypted dataset %s may not "
"be sent with properties without the raw flag\n"),
sd->fsname, sd->tosnap, zhp->zfs_name);
rv = -1;
goto out;
}
}
/*
* Iterate over snaps, and set sd->parent_fromsnap_guid.
*
* If this is a "doall" send, a replicate send or we're just trying
* to gather a list of previous snapshots, iterate through all the
* snaps in the txg range. Otherwise just look at the one we're
* interested in.
*/
sd->parent_fromsnap_guid = 0;
sd->parent_snaps = fnvlist_alloc();
sd->snapprops = fnvlist_alloc();
if (sd->holds)
sd->snapholds = fnvlist_alloc();
if (sd->doall || sd->replicate || sd->tosnap == NULL) {
if (!sd->replicate && fromsnap_txg != 0)
min_txg = fromsnap_txg;
if (!sd->replicate && tosnap_txg != 0)
max_txg = tosnap_txg;
(void) zfs_iter_snapshots_sorted_v2(zhp, 0, send_iterate_snap,
sd, min_txg, max_txg);
} else {
char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap;
(void) snprintf(snapname, sizeof (snapname), "%s@%s",
zhp->zfs_name, sd->tosnap);
if (sd->fromsnap != NULL)
sd->seenfrom = B_TRUE;
snap = zfs_open(zhp->zfs_hdl, snapname, ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
(void) send_iterate_snap(snap, sd);
}
fnvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps);
fnvlist_free(sd->parent_snaps);
fnvlist_add_nvlist(nvfs, "snapprops", sd->snapprops);
fnvlist_free(sd->snapprops);
if (sd->holds) {
fnvlist_add_nvlist(nvfs, "snapholds", sd->snapholds);
fnvlist_free(sd->snapholds);
}
/* Do not allow the size of the properties list to exceed the limit */
if ((fnvlist_size(nvfs) + fnvlist_size(sd->fss)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"warning: cannot send %s@%s: the size of the list of "
"snapshots and properties is too large to be received "
"successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOSPC;
goto out;
}
/* Add this fs to nvlist. */
(void) snprintf(guidstring, sizeof (guidstring),
"0x%llx", (longlong_t)guid);
fnvlist_add_nvlist(sd->fss, guidstring, nvfs);
/* Iterate over children. */
if (sd->recursive)
rv = zfs_iter_filesystems_v2(zhp, 0, send_iterate_fs, sd);
out:
/* Restore saved fields. */
sd->parent_fromsnap_guid = parent_fromsnap_guid_save;
sd->fromsnap_txg = fromsnap_txg_save;
sd->tosnap_txg = tosnap_txg_save;
fnvlist_free(nv);
fnvlist_free(nvfs);
zfs_close(zhp);
return (rv);
}
static int
gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
const char *tosnap, boolean_t recursive, boolean_t raw, boolean_t doall,
boolean_t replicate, boolean_t skipmissing, boolean_t verbose,
boolean_t backup, boolean_t holds, boolean_t props, nvlist_t **nvlp,
avl_tree_t **avlp)
{
zfs_handle_t *zhp;
send_data_t sd = { 0 };
int error;
zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (EZFS_BADTYPE);
sd.fss = fnvlist_alloc();
sd.fsname = fsname;
sd.fromsnap = fromsnap;
sd.tosnap = tosnap;
sd.recursive = recursive;
sd.raw = raw;
sd.doall = doall;
sd.replicate = replicate;
sd.skipmissing = skipmissing;
sd.verbose = verbose;
sd.backup = backup;
sd.holds = holds;
sd.props = props;
if ((error = send_iterate_fs(zhp, &sd)) != 0) {
fnvlist_free(sd.fss);
if (avlp != NULL)
*avlp = NULL;
*nvlp = NULL;
return (error);
}
if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) {
fnvlist_free(sd.fss);
*nvlp = NULL;
return (EZFS_NOMEM);
}
*nvlp = sd.fss;
return (0);
}
/*
* Routines specific to "zfs send"
*/
typedef struct send_dump_data {
/* these are all just the short snapname (the part after the @) */
const char *fromsnap;
const char *tosnap;
char prevsnap[ZFS_MAX_DATASET_NAME_LEN];
uint64_t prevsnap_obj;
boolean_t seenfrom, seento, replicate, doall, fromorigin;
boolean_t dryrun, parsable, progress, embed_data, std_out;
boolean_t large_block, compress, raw, holds;
boolean_t progressastitle;
int outfd;
boolean_t err;
nvlist_t *fss;
nvlist_t *snapholds;
avl_tree_t *fsavl;
snapfilter_cb_t *filter_cb;
void *filter_cb_arg;
nvlist_t *debugnv;
char holdtag[ZFS_MAX_DATASET_NAME_LEN];
int cleanup_fd;
int verbosity;
uint64_t size;
} send_dump_data_t;
static int
zfs_send_space(zfs_handle_t *zhp, const char *snapname, const char *from,
enum lzc_send_flags flags, uint64_t *spacep)
{
assert(snapname != NULL);
int error = lzc_send_space(snapname, from, flags, spacep);
if (error == 0)
return (0);
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot estimate space for '%s'"), snapname);
libzfs_handle_t *hdl = zhp->zfs_hdl;
switch (error) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, snapname,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
snapname);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", zfs_strerror(error));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, error, errbuf));
}
}
/*
* Dumps a backup of the given snapshot (incremental from fromsnap if it's not
* NULL) to the file descriptor specified by outfd.
*/
static int
dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, uint64_t fromsnap_obj,
boolean_t fromorigin, int outfd, enum lzc_send_flags flags,
nvlist_t *debugnv)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *thisdbg;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
assert(fromsnap_obj == 0 || !fromorigin);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = outfd;
zc.zc_obj = fromorigin;
zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zc.zc_fromobj = fromsnap_obj;
zc.zc_flags = flags;
if (debugnv != NULL) {
thisdbg = fnvlist_alloc();
if (fromsnap != NULL && fromsnap[0] != '\0')
fnvlist_add_string(thisdbg, "fromsnap", fromsnap);
}
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) {
char errbuf[ERRBUFLEN];
int error = errno;
(void) snprintf(errbuf, sizeof (errbuf), "%s '%s'",
dgettext(TEXT_DOMAIN, "warning: cannot send"),
zhp->zfs_name);
if (debugnv != NULL) {
fnvlist_add_uint64(thisdbg, "error", error);
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
fnvlist_free(thisdbg);
}
switch (error) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, zc.zc_name,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (@%s) does not exist"),
zc.zc_value);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", zfs_strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
if (debugnv != NULL) {
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
fnvlist_free(thisdbg);
}
return (0);
}
static void
gather_holds(zfs_handle_t *zhp, send_dump_data_t *sdd)
{
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
/*
* zfs_send() only sets snapholds for sends that need them,
* e.g. replication and doall.
*/
if (sdd->snapholds == NULL)
return;
fnvlist_add_string(sdd->snapholds, zhp->zfs_name, sdd->holdtag);
}
int
zfs_send_progress(zfs_handle_t *zhp, int fd, uint64_t *bytes_written,
uint64_t *blocks_visited)
{
zfs_cmd_t zc = {"\0"};
if (bytes_written != NULL)
*bytes_written = 0;
if (blocks_visited != NULL)
*blocks_visited = 0;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = fd;
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND_PROGRESS, &zc) != 0)
return (errno);
if (bytes_written != NULL)
*bytes_written = zc.zc_cookie;
if (blocks_visited != NULL)
*blocks_visited = zc.zc_objset_type;
return (0);
}
static volatile boolean_t send_progress_thread_signal_duetotimer;
static void
send_progress_thread_act(int sig, siginfo_t *info, void *ucontext)
{
(void) sig, (void) ucontext;
send_progress_thread_signal_duetotimer = info->si_code == SI_TIMER;
}
struct timer_desirability {
timer_t timer;
boolean_t desired;
};
static void
timer_delete_cleanup(void *timer)
{
struct timer_desirability *td = timer;
if (td->desired)
timer_delete(td->timer);
}
#ifdef SIGINFO
#define SEND_PROGRESS_THREAD_PARENT_BLOCK_SIGINFO sigaddset(&new, SIGINFO)
#else
#define SEND_PROGRESS_THREAD_PARENT_BLOCK_SIGINFO
#endif
#define SEND_PROGRESS_THREAD_PARENT_BLOCK(old) { \
sigset_t new; \
sigemptyset(&new); \
sigaddset(&new, SIGUSR1); \
SEND_PROGRESS_THREAD_PARENT_BLOCK_SIGINFO; \
pthread_sigmask(SIG_BLOCK, &new, old); \
}
static void *
send_progress_thread(void *arg)
{
progress_arg_t *pa = arg;
zfs_handle_t *zhp = pa->pa_zhp;
uint64_t bytes;
uint64_t blocks;
uint64_t total = pa->pa_size / 100;
char buf[16];
time_t t;
struct tm tm;
int err;
const struct sigaction signal_action =
{.sa_sigaction = send_progress_thread_act, .sa_flags = SA_SIGINFO};
struct sigevent timer_cfg =
{.sigev_notify = SIGEV_SIGNAL, .sigev_signo = SIGUSR1};
const struct itimerspec timer_time =
{.it_value = {.tv_sec = 1}, .it_interval = {.tv_sec = 1}};
struct timer_desirability timer = {};
sigaction(SIGUSR1, &signal_action, NULL);
#ifdef SIGINFO
sigaction(SIGINFO, &signal_action, NULL);
#endif
if ((timer.desired = pa->pa_progress || pa->pa_astitle)) {
if (timer_create(CLOCK_MONOTONIC, &timer_cfg, &timer.timer))
return ((void *)(uintptr_t)errno);
(void) timer_settime(timer.timer, 0, &timer_time, NULL);
}
pthread_cleanup_push(timer_delete_cleanup, &timer);
if (!pa->pa_parsable && pa->pa_progress) {
(void) fprintf(stderr,
"TIME %s %sSNAPSHOT %s\n",
pa->pa_estimate ? "BYTES" : " SENT",
pa->pa_verbosity >= 2 ? " BLOCKS " : "",
zhp->zfs_name);
}
/*
* Print the progress from ZFS_IOC_SEND_PROGRESS every second.
*/
for (;;) {
pause();
if ((err = zfs_send_progress(zhp, pa->pa_fd, &bytes,
&blocks)) != 0) {
if (err == EINTR || err == ENOENT)
err = 0;
pthread_exit(((void *)(uintptr_t)err));
}
(void) time(&t);
localtime_r(&t, &tm);
if (pa->pa_astitle) {
char buf_bytes[16];
char buf_size[16];
int pct;
zfs_nicenum(bytes, buf_bytes, sizeof (buf_bytes));
zfs_nicenum(pa->pa_size, buf_size, sizeof (buf_size));
pct = (total > 0) ? bytes / total : 100;
zfs_setproctitle("sending %s (%d%%: %s/%s)",
zhp->zfs_name, MIN(pct, 100), buf_bytes, buf_size);
}
if (pa->pa_verbosity >= 2 && pa->pa_parsable) {
(void) fprintf(stderr,
"%02d:%02d:%02d\t%llu\t%llu\t%s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
(u_longlong_t)bytes, (u_longlong_t)blocks,
zhp->zfs_name);
} else if (pa->pa_verbosity >= 2) {
zfs_nicenum(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"%02d:%02d:%02d %5s %8llu %s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
buf, (u_longlong_t)blocks, zhp->zfs_name);
} else if (pa->pa_parsable) {
(void) fprintf(stderr, "%02d:%02d:%02d\t%llu\t%s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
(u_longlong_t)bytes, zhp->zfs_name);
} else if (pa->pa_progress ||
!send_progress_thread_signal_duetotimer) {
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr, "%02d:%02d:%02d %5s %s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
buf, zhp->zfs_name);
}
}
pthread_cleanup_pop(B_TRUE);
return (NULL);
}
static boolean_t
send_progress_thread_exit(
libzfs_handle_t *hdl, pthread_t ptid, sigset_t *oldmask)
{
void *status = NULL;
(void) pthread_cancel(ptid);
(void) pthread_join(ptid, &status);
pthread_sigmask(SIG_SETMASK, oldmask, NULL);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED)
return (zfs_standard_error(hdl, error,
dgettext(TEXT_DOMAIN, "progress thread exited nonzero")));
else
return (B_FALSE);
}
static void
send_print_verbose(FILE *fout, const char *tosnap, const char *fromsnap,
uint64_t size, boolean_t parsable)
{
if (parsable) {
if (fromsnap != NULL) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"incremental\t%s\t%s"), fromsnap, tosnap);
} else {
/*
* Workaround for GCC 12+ with UBSan enabled deficencies.
*
* GCC 12+ invoked with -fsanitize=undefined incorrectly reports the code
* below as violating -Wformat-overflow.
*/
#if defined(__GNUC__) && !defined(__clang__) && \
defined(ZFS_UBSAN_ENABLED) && defined(HAVE_FORMAT_OVERFLOW)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-overflow"
#endif
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"full\t%s"), tosnap);
#if defined(__GNUC__) && !defined(__clang__) && \
defined(ZFS_UBSAN_ENABLED) && defined(HAVE_FORMAT_OVERFLOW)
#pragma GCC diagnostic pop
#endif
}
(void) fprintf(fout, "\t%llu", (longlong_t)size);
} else {
if (fromsnap != NULL) {
if (strchr(fromsnap, '@') == NULL &&
strchr(fromsnap, '#') == NULL) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from @%s to %s"), fromsnap, tosnap);
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from %s to %s"), fromsnap, tosnap);
}
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"full send of %s"), tosnap);
}
if (size != 0) {
char buf[16];
zfs_nicebytes(size, buf, sizeof (buf));
/*
* Workaround for GCC 12+ with UBSan enabled deficencies.
*
* GCC 12+ invoked with -fsanitize=undefined incorrectly reports the code
* below as violating -Wformat-overflow.
*/
#if defined(__GNUC__) && !defined(__clang__) && \
defined(ZFS_UBSAN_ENABLED) && defined(HAVE_FORMAT_OVERFLOW)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-overflow"
#endif
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
" estimated size is %s"), buf);
#if defined(__GNUC__) && !defined(__clang__) && \
defined(ZFS_UBSAN_ENABLED) && defined(HAVE_FORMAT_OVERFLOW)
#pragma GCC diagnostic pop
#endif
}
}
(void) fprintf(fout, "\n");
}
/*
* Send a single filesystem snapshot, updating the send dump data.
* This interface is intended for use as a zfs_iter_snapshots_v2_sorted visitor.
*/
static int
dump_snapshot(zfs_handle_t *zhp, void *arg)
{
send_dump_data_t *sdd = arg;
progress_arg_t pa = { 0 };
pthread_t tid;
char *thissnap;
enum lzc_send_flags flags = 0;
int err;
boolean_t isfromsnap, istosnap, fromorigin;
boolean_t exclude = B_FALSE;
FILE *fout = sdd->std_out ? stdout : stderr;
err = 0;
thissnap = strchr(zhp->zfs_name, '@') + 1;
isfromsnap = (sdd->fromsnap != NULL &&
strcmp(sdd->fromsnap, thissnap) == 0);
if (!sdd->seenfrom && isfromsnap) {
gather_holds(zhp, sdd);
sdd->seenfrom = B_TRUE;
(void) strlcpy(sdd->prevsnap, thissnap, sizeof (sdd->prevsnap));
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (0);
}
if (sdd->seento || !sdd->seenfrom) {
zfs_close(zhp);
return (0);
}
istosnap = (strcmp(sdd->tosnap, thissnap) == 0);
if (istosnap)
sdd->seento = B_TRUE;
if (sdd->large_block)
flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (sdd->embed_data)
flags |= LZC_SEND_FLAG_EMBED_DATA;
if (sdd->compress)
flags |= LZC_SEND_FLAG_COMPRESS;
if (sdd->raw)
flags |= LZC_SEND_FLAG_RAW;
if (!sdd->doall && !isfromsnap && !istosnap) {
if (sdd->replicate) {
const char *snapname;
nvlist_t *snapprops;
/*
* Filter out all intermediate snapshots except origin
* snapshots needed to replicate clones.
*/
nvlist_t *nvfs = fsavl_find(sdd->fsavl,
zhp->zfs_dmustats.dds_guid, &snapname);
if (nvfs != NULL) {
snapprops = fnvlist_lookup_nvlist(nvfs,
"snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops,
thissnap);
exclude = !nvlist_exists(snapprops,
"is_clone_origin");
}
} else {
exclude = B_TRUE;
}
}
/*
* If a filter function exists, call it to determine whether
* this snapshot will be sent.
*/
if (exclude || (sdd->filter_cb != NULL &&
sdd->filter_cb(zhp, sdd->filter_cb_arg) == B_FALSE)) {
/*
* This snapshot is filtered out. Don't send it, and don't
* set prevsnap_obj, so it will be as if this snapshot didn't
* exist, and the next accepted snapshot will be sent as
* an incremental from the last accepted one, or as the
* first (and full) snapshot in the case of a replication,
* non-incremental send.
*/
zfs_close(zhp);
return (0);
}
gather_holds(zhp, sdd);
fromorigin = sdd->prevsnap[0] == '\0' &&
(sdd->fromorigin || sdd->replicate);
if (sdd->verbosity != 0) {
uint64_t size = 0;
char fromds[ZFS_MAX_DATASET_NAME_LEN];
if (sdd->prevsnap[0] != '\0') {
(void) strlcpy(fromds, zhp->zfs_name, sizeof (fromds));
*(strchr(fromds, '@') + 1) = '\0';
(void) strlcat(fromds, sdd->prevsnap, sizeof (fromds));
}
if (zfs_send_space(zhp, zhp->zfs_name,
sdd->prevsnap[0] ? fromds : NULL, flags, &size) == 0) {
send_print_verbose(fout, zhp->zfs_name,
sdd->prevsnap[0] ? sdd->prevsnap : NULL,
size, sdd->parsable);
sdd->size += size;
}
}
if (!sdd->dryrun) {
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
sigset_t oldmask;
{
pa.pa_zhp = zhp;
pa.pa_fd = sdd->outfd;
pa.pa_parsable = sdd->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = sdd->verbosity;
pa.pa_size = sdd->size;
pa.pa_astitle = sdd->progressastitle;
pa.pa_progress = sdd->progress;
if ((err = pthread_create(&tid, NULL,
send_progress_thread, &pa)) != 0) {
zfs_close(zhp);
return (err);
}
SEND_PROGRESS_THREAD_PARENT_BLOCK(&oldmask);
}
err = dump_ioctl(zhp, sdd->prevsnap, sdd->prevsnap_obj,
fromorigin, sdd->outfd, flags, sdd->debugnv);
if (send_progress_thread_exit(zhp->zfs_hdl, tid, &oldmask))
return (-1);
}
(void) strlcpy(sdd->prevsnap, thissnap, sizeof (sdd->prevsnap));
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (err);
}
/*
* Send all snapshots for a filesystem, updating the send dump data.
*/
static int
dump_filesystem(zfs_handle_t *zhp, send_dump_data_t *sdd)
{
int rv = 0;
boolean_t missingfrom = B_FALSE;
zfs_cmd_t zc = {"\0"};
uint64_t min_txg = 0, max_txg = 0;
/*
* Make sure the tosnap exists.
*/
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->tosnap);
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
sdd->err = B_TRUE;
return (0);
}
/*
* If this fs does not have fromsnap, and we're doing
* recursive, we need to send a full stream from the
* beginning (or an incremental from the origin if this
* is a clone). If we're doing non-recursive, then let
* them get the error.
*/
if (sdd->replicate && sdd->fromsnap) {
/*
* Make sure the fromsnap exists.
*/
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->fromsnap);
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0)
missingfrom = B_TRUE;
}
sdd->seenfrom = sdd->seento = B_FALSE;
sdd->prevsnap[0] = '\0';
sdd->prevsnap_obj = 0;
if (sdd->fromsnap == NULL || missingfrom)
sdd->seenfrom = B_TRUE;
/*
* Iterate through all snapshots and process the ones we will be
* sending. If we only have a "from" and "to" snapshot to deal
* with, we can avoid iterating through all the other snapshots.
*/
if (sdd->doall || sdd->replicate || sdd->tosnap == NULL) {
if (!sdd->replicate) {
if (sdd->fromsnap != NULL) {
min_txg = get_snap_txg(zhp->zfs_hdl,
zhp->zfs_name, sdd->fromsnap);
}
if (sdd->tosnap != NULL) {
max_txg = get_snap_txg(zhp->zfs_hdl,
zhp->zfs_name, sdd->tosnap);
}
}
rv = zfs_iter_snapshots_sorted_v2(zhp, 0, dump_snapshot, sdd,
min_txg, max_txg);
} else {
char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap;
/* Dump fromsnap. */
if (!sdd->seenfrom) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->fromsnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = errno;
}
/* Dump tosnap. */
if (rv == 0) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->tosnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = errno;
}
}
if (!sdd->seenfrom) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) does not exist\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
sdd->err = B_TRUE;
} else if (!sdd->seento) {
if (sdd->fromsnap) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) "
"is not earlier than it\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: "
"could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
}
sdd->err = B_TRUE;
}
return (rv);
}
/*
* Send all snapshots for all filesystems in sdd.
*/
static int
dump_filesystems(zfs_handle_t *rzhp, send_dump_data_t *sdd)
{
nvpair_t *fspair;
boolean_t needagain, progress;
if (!sdd->replicate)
return (dump_filesystem(rzhp, sdd));
/* Mark the clone origin snapshots. */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *nvfs;
uint64_t origin_guid = 0;
nvfs = fnvpair_value_nvlist(fspair);
(void) nvlist_lookup_uint64(nvfs, "origin", &origin_guid);
if (origin_guid != 0) {
const char *snapname;
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, &snapname);
if (origin_nv != NULL) {
nvlist_t *snapprops;
snapprops = fnvlist_lookup_nvlist(origin_nv,
"snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops,
snapname);
fnvlist_add_boolean(snapprops,
"is_clone_origin");
}
}
}
again:
needagain = progress = B_FALSE;
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist, *parent_nv;
const char *fsname;
zfs_handle_t *zhp;
int err;
uint64_t origin_guid = 0;
uint64_t parent_guid = 0;
fslist = fnvpair_value_nvlist(fspair);
if (nvlist_lookup_boolean(fslist, "sent") == 0)
continue;
fsname = fnvlist_lookup_string(fslist, "name");
(void) nvlist_lookup_uint64(fslist, "origin", &origin_guid);
(void) nvlist_lookup_uint64(fslist, "parentfromsnap",
&parent_guid);
if (parent_guid != 0) {
parent_nv = fsavl_find(sdd->fsavl, parent_guid, NULL);
if (!nvlist_exists(parent_nv, "sent")) {
/* Parent has not been sent; skip this one. */
needagain = B_TRUE;
continue;
}
}
if (origin_guid != 0) {
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, NULL);
if (origin_nv != NULL &&
!nvlist_exists(origin_nv, "sent")) {
/*
* Origin has not been sent yet;
* skip this clone.
*/
needagain = B_TRUE;
continue;
}
}
zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
err = dump_filesystem(zhp, sdd);
fnvlist_add_boolean(fslist, "sent");
progress = B_TRUE;
zfs_close(zhp);
if (err)
return (err);
}
if (needagain) {
assert(progress);
goto again;
}
/* Clean out the sent flags in case we reuse this fss. */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist;
fslist = fnvpair_value_nvlist(fspair);
(void) nvlist_remove_all(fslist, "sent");
}
return (0);
}
nvlist_t *
zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl, const char *token)
{
unsigned int version;
int nread, i;
unsigned long long checksum, packed_len;
/*
* Decode token header, which is:
* <token version>-<checksum of payload>-<uncompressed payload length>
* Note that the only supported token version is 1.
*/
nread = sscanf(token, "%u-%llx-%llx-",
&version, &checksum, &packed_len);
if (nread != 3) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid format)"));
return (NULL);
}
if (version != ZFS_SEND_RESUME_TOKEN_VERSION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid version %u)"),
version);
return (NULL);
}
/* Convert hexadecimal representation to binary. */
token = strrchr(token, '-') + 1;
int len = strlen(token) / 2;
unsigned char *compressed = zfs_alloc(hdl, len);
for (i = 0; i < len; i++) {
nread = sscanf(token + i * 2, "%2hhx", compressed + i);
if (nread != 1) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt "
"(payload is not hex-encoded)"));
return (NULL);
}
}
/* Verify checksum. */
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, len, &cksum);
if (cksum.zc_word[0] != checksum) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (incorrect checksum)"));
return (NULL);
}
/* Uncompress. */
void *packed = zfs_alloc(hdl, packed_len);
uLongf packed_len_long = packed_len;
if (uncompress(packed, &packed_len_long, compressed, len) != Z_OK ||
packed_len_long != packed_len) {
free(packed);
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (decompression failed)"));
return (NULL);
}
/* Unpack nvlist. */
nvlist_t *nv;
int error = nvlist_unpack(packed, packed_len, &nv, KM_SLEEP);
free(packed);
free(compressed);
if (error != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (nvlist_unpack failed)"));
return (NULL);
}
return (nv);
}
static enum lzc_send_flags
lzc_flags_from_sendflags(const sendflags_t *flags)
{
enum lzc_send_flags lzc_flags = 0;
if (flags->largeblock)
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (flags->embed_data)
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (flags->compress)
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (flags->raw)
lzc_flags |= LZC_SEND_FLAG_RAW;
if (flags->saved)
lzc_flags |= LZC_SEND_FLAG_SAVED;
return (lzc_flags);
}
static int
estimate_size(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
uint64_t resumeobj, uint64_t resumeoff, uint64_t bytes,
const char *redactbook, char *errbuf, uint64_t *sizep)
{
uint64_t size;
FILE *fout = flags->dryrun ? stdout : stderr;
progress_arg_t pa = { 0 };
int err = 0;
pthread_t ptid;
sigset_t oldmask;
{
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_TRUE;
pa.pa_verbosity = flags->verbosity;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", zfs_strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
SEND_PROGRESS_THREAD_PARENT_BLOCK(&oldmask);
}
err = lzc_send_space_resume_redacted(zhp->zfs_name, from,
lzc_flags_from_sendflags(flags), resumeobj, resumeoff, bytes,
redactbook, fd, &size);
*sizep = size;
if (send_progress_thread_exit(zhp->zfs_hdl, ptid, &oldmask))
return (-1);
if (!flags->progress && !flags->parsable)
return (err);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", zfs_strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
send_print_verbose(fout, zhp->zfs_name, from, size,
flags->parsable);
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n", (longlong_t)size);
} else {
char buf[16];
zfs_nicenum(size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
return (0);
}
static boolean_t
redact_snaps_contains(const uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
redact_snaps_equal(const uint64_t *snaps1, uint64_t num_snaps1,
const uint64_t *snaps2, uint64_t num_snaps2)
{
if (num_snaps1 != num_snaps2)
return (B_FALSE);
for (int i = 0; i < num_snaps1; i++) {
if (!redact_snaps_contains(snaps2, num_snaps2, snaps1[i]))
return (B_FALSE);
}
return (B_TRUE);
}
static int
get_bookmarks(const char *path, nvlist_t **bmarksp)
{
nvlist_t *props = fnvlist_alloc();
int error;
fnvlist_add_boolean(props, "redact_complete");
fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
error = lzc_get_bookmarks(path, props, bmarksp);
fnvlist_free(props);
return (error);
}
static nvpair_t *
find_redact_pair(nvlist_t *bmarks, const uint64_t *redact_snap_guids,
int num_redact_snaps)
{
nvpair_t *pair;
for (pair = nvlist_next_nvpair(bmarks, NULL); pair;
pair = nvlist_next_nvpair(bmarks, pair)) {
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
uint_t len = 0;
uint64_t *bmarksnaps = fnvlist_lookup_uint64_array(vallist,
ZPROP_VALUE, &len);
if (redact_snaps_equal(redact_snap_guids,
num_redact_snaps, bmarksnaps, len)) {
break;
}
}
return (pair);
}
static boolean_t
get_redact_complete(nvpair_t *pair)
{
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark, "redact_complete");
boolean_t complete = fnvlist_lookup_boolean_value(vallist,
ZPROP_VALUE);
return (complete);
}
/*
* Check that the list of redaction snapshots in the bookmark matches the send
* we're resuming, and return whether or not it's complete.
*
* Note that the caller needs to free the contents of *bookname with free() if
* this function returns successfully.
*/
static int
find_redact_book(libzfs_handle_t *hdl, const char *path,
const uint64_t *redact_snap_guids, int num_redact_snaps,
char **bookname)
{
char errbuf[ERRBUFLEN];
nvlist_t *bmarks;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
int error = get_bookmarks(path, &bmarks);
if (error != 0) {
if (error == ESRCH) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"nonexistent redaction bookmark provided"));
} else if (error == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset to be sent no longer exists"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unknown error: %s"), zfs_strerror(error));
}
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
nvpair_t *pair = find_redact_pair(bmarks, redact_snap_guids,
num_redact_snaps);
if (pair == NULL) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no appropriate redaction bookmark exists"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
boolean_t complete = get_redact_complete(pair);
if (!complete) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incomplete redaction bookmark provided"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
*bookname = strndup(nvpair_name(pair), ZFS_MAX_DATASET_NAME_LEN);
ASSERT3P(*bookname, !=, NULL);
fnvlist_free(bmarks);
return (0);
}
static enum lzc_send_flags
lzc_flags_from_resume_nvl(nvlist_t *resume_nvl)
{
enum lzc_send_flags lzc_flags = 0;
if (nvlist_exists(resume_nvl, "largeblockok"))
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (nvlist_exists(resume_nvl, "embedok"))
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (nvlist_exists(resume_nvl, "compressok"))
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (nvlist_exists(resume_nvl, "rawok"))
lzc_flags |= LZC_SEND_FLAG_RAW;
if (nvlist_exists(resume_nvl, "savedok"))
lzc_flags |= LZC_SEND_FLAG_SAVED;
return (lzc_flags);
}
static int
zfs_send_resume_impl_cb_impl(libzfs_handle_t *hdl, sendflags_t *flags,
int outfd, nvlist_t *resume_nvl)
{
char errbuf[ERRBUFLEN];
const char *toname;
const char *fromname = NULL;
uint64_t resumeobj, resumeoff, toguid, fromguid, bytes;
zfs_handle_t *zhp;
int error = 0;
char name[ZFS_MAX_DATASET_NAME_LEN];
FILE *fout = (flags->verbosity > 0 && flags->dryrun) ? stdout : stderr;
uint64_t *redact_snap_guids = NULL;
int num_redact_snaps = 0;
char *redact_book = NULL;
uint64_t size = 0;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
if (flags->verbosity != 0) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"resume token contents:\n"));
nvlist_print(fout, resume_nvl);
}
if (nvlist_lookup_string(resume_nvl, "toname", &toname) != 0 ||
nvlist_lookup_uint64(resume_nvl, "object", &resumeobj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &resumeoff) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid", &toguid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt"));
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
fromguid = 0;
(void) nvlist_lookup_uint64(resume_nvl, "fromguid", &fromguid);
if (flags->saved) {
(void) strlcpy(name, toname, sizeof (name));
} else {
error = guid_to_name(hdl, toname, toguid, B_FALSE, name);
if (error != 0) {
if (zfs_dataset_exists(hdl, toname, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is no longer the same snapshot "
"used in the initial send"), toname);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' used in the initial send no "
"longer exists"), toname);
}
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
}
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unable to access '%s'"), name);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
if (nvlist_lookup_uint64_array(resume_nvl, "book_redact_snaps",
&redact_snap_guids, (uint_t *)&num_redact_snaps) != 0) {
num_redact_snaps = -1;
}
if (fromguid != 0) {
if (guid_to_name_redact_snaps(hdl, toname, fromguid, B_TRUE,
redact_snap_guids, num_redact_snaps, name) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source %#llx no longer exists"),
(longlong_t)fromguid);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
fromname = name;
}
redact_snap_guids = NULL;
if (nvlist_lookup_uint64_array(resume_nvl,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &redact_snap_guids,
(uint_t *)&num_redact_snaps) == 0) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, toname, sizeof (path));
char *at = strchr(path, '@');
ASSERT3P(at, !=, NULL);
*at = '\0';
if ((error = find_redact_book(hdl, path, redact_snap_guids,
num_redact_snaps, &redact_book)) != 0) {
return (error);
}
}
enum lzc_send_flags lzc_flags = lzc_flags_from_sendflags(flags) |
lzc_flags_from_resume_nvl(resume_nvl);
if (flags->verbosity != 0 || flags->progressastitle) {
/*
* Some of these may have come from the resume token, set them
* here for size estimate purposes.
*/
sendflags_t tmpflags = *flags;
if (lzc_flags & LZC_SEND_FLAG_LARGE_BLOCK)
tmpflags.largeblock = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_COMPRESS)
tmpflags.compress = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_EMBED_DATA)
tmpflags.embed_data = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_RAW)
tmpflags.raw = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_SAVED)
tmpflags.saved = B_TRUE;
error = estimate_size(zhp, fromname, outfd, &tmpflags,
resumeobj, resumeoff, bytes, redact_book, errbuf, &size);
}
if (!flags->dryrun) {
progress_arg_t pa = { 0 };
pthread_t tid;
sigset_t oldmask;
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
{
pa.pa_zhp = zhp;
pa.pa_fd = outfd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
pa.pa_size = size;
pa.pa_astitle = flags->progressastitle;
pa.pa_progress = flags->progress;
error = pthread_create(&tid, NULL,
send_progress_thread, &pa);
if (error != 0) {
if (redact_book != NULL)
free(redact_book);
zfs_close(zhp);
return (error);
}
SEND_PROGRESS_THREAD_PARENT_BLOCK(&oldmask);
}
error = lzc_send_resume_redacted(zhp->zfs_name, fromname, outfd,
lzc_flags, resumeobj, resumeoff, redact_book);
if (redact_book != NULL)
free(redact_book);
if (send_progress_thread_exit(hdl, tid, &oldmask)) {
zfs_close(zhp);
return (-1);
}
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
zfs_close(zhp);
switch (error) {
case 0:
return (0);
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ESRCH:
if (lzc_exists(zhp->zfs_name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source could not be found"));
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EXDEV:
case ENOENT:
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
zfs_error_aux(hdl, "%s", zfs_strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
} else {
if (redact_book != NULL)
free(redact_book);
}
zfs_close(zhp);
return (error);
}
struct zfs_send_resume_impl {
libzfs_handle_t *hdl;
sendflags_t *flags;
nvlist_t *resume_nvl;
};
static int
zfs_send_resume_impl_cb(int outfd, void *arg)
{
struct zfs_send_resume_impl *zsri = arg;
return (zfs_send_resume_impl_cb_impl(zsri->hdl, zsri->flags, outfd,
zsri->resume_nvl));
}
static int
zfs_send_resume_impl(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
nvlist_t *resume_nvl)
{
struct zfs_send_resume_impl zsri = {
.hdl = hdl,
.flags = flags,
.resume_nvl = resume_nvl,
};
return (lzc_send_wrapper(zfs_send_resume_impl_cb, outfd, &zsri));
}
int
zfs_send_resume(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
char errbuf[ERRBUFLEN];
nvlist_t *resume_nvl;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
resume_nvl = zfs_send_resume_token_to_nvlist(hdl, resume_token);
if (resume_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
ret = zfs_send_resume_impl(hdl, flags, outfd, resume_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
int
zfs_send_saved(zfs_handle_t *zhp, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *saved_nvl = NULL, *resume_nvl = NULL;
uint64_t saved_guid = 0, resume_guid = 0;
uint64_t obj = 0, off = 0, bytes = 0;
char token_buf[ZFS_MAXPROPLEN];
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"saved send failed"));
ret = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf), NULL, NULL, 0, B_TRUE);
if (ret != 0)
goto out;
saved_nvl = zfs_send_resume_token_to_nvlist(hdl, token_buf);
if (saved_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
/*
* If a resume token is provided we use the object and offset
* from that instead of the default, which starts from the
* beginning.
*/
if (resume_token != NULL) {
resume_nvl = zfs_send_resume_token_to_nvlist(hdl,
resume_token);
if (resume_nvl == NULL) {
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(resume_nvl, "object", &obj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &off) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid",
&resume_guid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(saved_nvl, "toguid",
&saved_guid)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset's resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (resume_guid != saved_guid) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token does not match dataset"));
ret = zfs_error(hdl, EZFS_BADBACKUP, errbuf);
goto out;
}
}
(void) nvlist_remove_all(saved_nvl, "object");
fnvlist_add_uint64(saved_nvl, "object", obj);
(void) nvlist_remove_all(saved_nvl, "offset");
fnvlist_add_uint64(saved_nvl, "offset", off);
(void) nvlist_remove_all(saved_nvl, "bytes");
fnvlist_add_uint64(saved_nvl, "bytes", bytes);
(void) nvlist_remove_all(saved_nvl, "toname");
fnvlist_add_string(saved_nvl, "toname", zhp->zfs_name);
ret = zfs_send_resume_impl(hdl, flags, outfd, saved_nvl);
out:
fnvlist_free(saved_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
/*
* This function informs the target system that the recursive send is complete.
* The record is also expected in the case of a send -p.
*/
static int
send_conclusion_record(int fd, zio_cksum_t *zc)
{
dmu_replay_record_t drr;
memset(&drr, 0, sizeof (dmu_replay_record_t));
drr.drr_type = DRR_END;
if (zc != NULL)
drr.drr_u.drr_end.drr_checksum = *zc;
if (write(fd, &drr, sizeof (drr)) == -1) {
return (errno);
}
return (0);
}
/*
* This function is responsible for sending the records that contain the
* necessary information for the target system's libzfs to be able to set the
* properties of the filesystem being received, or to be able to prepare for
* a recursive receive.
*
* The "zhp" argument is the handle of the snapshot we are sending
* (the "tosnap"). The "from" argument is the short snapshot name (the part
* after the @) of the incremental source.
*/
static int
send_prelim_records(zfs_handle_t *zhp, const char *from, int fd,
boolean_t gather_props, boolean_t recursive, boolean_t verbose,
boolean_t dryrun, boolean_t raw, boolean_t replicate, boolean_t skipmissing,
boolean_t backup, boolean_t holds, boolean_t props, boolean_t doall,
nvlist_t **fssp, avl_tree_t **fsavlp)
{
int err = 0;
char *packbuf = NULL;
size_t buflen = 0;
zio_cksum_t zc = { {0} };
int featureflags = 0;
/* name of filesystem/volume that contains snapshot we are sending */
char tofs[ZFS_MAX_DATASET_NAME_LEN];
/* short name of snap we are sending */
const char *tosnap = "";
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM && zfs_prop_get_int(zhp,
ZFS_PROP_VERSION) >= ZPL_VERSION_SA) {
featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
if (holds)
featureflags |= DMU_BACKUP_FEATURE_HOLDS;
(void) strlcpy(tofs, zhp->zfs_name, ZFS_MAX_DATASET_NAME_LEN);
char *at = strchr(tofs, '@');
if (at != NULL) {
*at = '\0';
tosnap = at + 1;
}
if (gather_props) {
nvlist_t *hdrnv = fnvlist_alloc();
nvlist_t *fss = NULL;
if (from != NULL)
fnvlist_add_string(hdrnv, "fromsnap", from);
fnvlist_add_string(hdrnv, "tosnap", tosnap);
if (!recursive)
fnvlist_add_boolean(hdrnv, "not_recursive");
if (raw) {
fnvlist_add_boolean(hdrnv, "raw");
}
if (gather_nvlist(zhp->zfs_hdl, tofs,
from, tosnap, recursive, raw, doall, replicate, skipmissing,
verbose, backup, holds, props, &fss, fsavlp) != 0) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
/*
* Do not allow the size of the properties list to exceed
* the limit
*/
if ((fnvlist_size(fss) + fnvlist_size(hdrnv)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "warning: cannot send '%s': "
"the size of the list of snapshots and properties "
"is too large to be received successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name);
return (zfs_error(zhp->zfs_hdl, EZFS_NOSPC,
errbuf));
}
fnvlist_add_nvlist(hdrnv, "fss", fss);
VERIFY0(nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR,
0));
if (fssp != NULL) {
*fssp = fss;
} else {
fnvlist_free(fss);
}
fnvlist_free(hdrnv);
}
if (!dryrun) {
dmu_replay_record_t drr;
memset(&drr, 0, sizeof (dmu_replay_record_t));
/* write first begin record */
drr.drr_type = DRR_BEGIN;
drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
DMU_SET_STREAM_HDRTYPE(drr.drr_u.drr_begin.
drr_versioninfo, DMU_COMPOUNDSTREAM);
DMU_SET_FEATUREFLAGS(drr.drr_u.drr_begin.
drr_versioninfo, featureflags);
if (snprintf(drr.drr_u.drr_begin.drr_toname,
sizeof (drr.drr_u.drr_begin.drr_toname), "%s@%s", tofs,
tosnap) >= sizeof (drr.drr_u.drr_begin.drr_toname)) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
drr.drr_payloadlen = buflen;
err = dump_record(&drr, packbuf, buflen, &zc, fd);
free(packbuf);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", zfs_strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
err = send_conclusion_record(fd, &zc);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", zfs_strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
}
return (0);
}
/*
* Generate a send stream. The "zhp" argument is the filesystem/volume
* that contains the snapshot to send. The "fromsnap" argument is the
* short name (the part after the '@') of the snapshot that is the
* incremental source to send from (if non-NULL). The "tosnap" argument
* is the short name of the snapshot to send.
*
* The content of the send stream is the snapshot identified by
* 'tosnap'. Incremental streams are requested in two ways:
* - from the snapshot identified by "fromsnap" (if non-null) or
* - from the origin of the dataset identified by zhp, which must
* be a clone. In this case, "fromsnap" is null and "fromorigin"
* is TRUE.
*
* The send stream is recursive (i.e. dumps a hierarchy of snapshots) and
* uses a special header (with a hdrtype field of DMU_COMPOUNDSTREAM)
* if "replicate" is set. If "doall" is set, dump all the intermediate
* snapshots. The DMU_COMPOUNDSTREAM header is used in the "doall"
* case too. If "props" is set, send properties.
*
* Pre-wrapped (cf. lzc_send_wrapper()).
*/
static int
zfs_send_cb_impl(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
void *cb_arg, nvlist_t **debugnvp)
{
char errbuf[ERRBUFLEN];
send_dump_data_t sdd = { 0 };
int err = 0;
nvlist_t *fss = NULL;
avl_tree_t *fsavl = NULL;
static uint64_t holdseq;
int spa_version;
FILE *fout;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot send '%s'"), zhp->zfs_name);
if (fromsnap && fromsnap[0] == '\0') {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"zero-length incremental source"));
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
}
if (fromsnap) {
char full_fromsnap_name[ZFS_MAX_DATASET_NAME_LEN];
if (snprintf(full_fromsnap_name, sizeof (full_fromsnap_name),
"%s@%s", zhp->zfs_name, fromsnap) >=
sizeof (full_fromsnap_name)) {
err = EINVAL;
goto stderr_out;
}
zfs_handle_t *fromsnapn = zfs_open(zhp->zfs_hdl,
full_fromsnap_name, ZFS_TYPE_SNAPSHOT);
if (fromsnapn == NULL) {
err = -1;
goto err_out;
}
zfs_close(fromsnapn);
}
if (flags->replicate || flags->doall || flags->props ||
flags->holds || flags->backup) {
char full_tosnap_name[ZFS_MAX_DATASET_NAME_LEN];
if (snprintf(full_tosnap_name, sizeof (full_tosnap_name),
"%s@%s", zhp->zfs_name, tosnap) >=
sizeof (full_tosnap_name)) {
err = EINVAL;
goto stderr_out;
}
zfs_handle_t *tosnap = zfs_open(zhp->zfs_hdl,
full_tosnap_name, ZFS_TYPE_SNAPSHOT);
if (tosnap == NULL) {
err = -1;
goto err_out;
}
err = send_prelim_records(tosnap, fromsnap, outfd,
flags->replicate || flags->props || flags->holds,
flags->replicate, flags->verbosity > 0, flags->dryrun,
flags->raw, flags->replicate, flags->skipmissing,
flags->backup, flags->holds, flags->props, flags->doall,
&fss, &fsavl);
zfs_close(tosnap);
if (err != 0)
goto err_out;
}
/* dump each stream */
sdd.fromsnap = fromsnap;
sdd.tosnap = tosnap;
sdd.outfd = outfd;
sdd.replicate = flags->replicate;
sdd.doall = flags->doall;
sdd.fromorigin = flags->fromorigin;
sdd.fss = fss;
sdd.fsavl = fsavl;
sdd.verbosity = flags->verbosity;
sdd.parsable = flags->parsable;
sdd.progress = flags->progress;
sdd.progressastitle = flags->progressastitle;
sdd.dryrun = flags->dryrun;
sdd.large_block = flags->largeblock;
sdd.embed_data = flags->embed_data;
sdd.compress = flags->compress;
sdd.raw = flags->raw;
sdd.holds = flags->holds;
sdd.filter_cb = filter_func;
sdd.filter_cb_arg = cb_arg;
if (debugnvp)
sdd.debugnv = *debugnvp;
if (sdd.verbosity != 0 && sdd.dryrun)
sdd.std_out = B_TRUE;
fout = sdd.std_out ? stdout : stderr;
/*
* Some flags require that we place user holds on the datasets that are
* being sent so they don't get destroyed during the send. We can skip
* this step if the pool is imported read-only since the datasets cannot
* be destroyed.
*/
if (!flags->dryrun && !zpool_get_prop_int(zfs_get_pool_handle(zhp),
ZPOOL_PROP_READONLY, NULL) &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS &&
(flags->doall || flags->replicate)) {
++holdseq;
(void) snprintf(sdd.holdtag, sizeof (sdd.holdtag),
".send-%d-%llu", getpid(), (u_longlong_t)holdseq);
sdd.cleanup_fd = open(ZFS_DEV, O_RDWR | O_CLOEXEC);
if (sdd.cleanup_fd < 0) {
err = errno;
goto stderr_out;
}
sdd.snapholds = fnvlist_alloc();
} else {
sdd.cleanup_fd = -1;
sdd.snapholds = NULL;
}
if (flags->verbosity != 0 || sdd.snapholds != NULL) {
/*
* Do a verbose no-op dry run to get all the verbose output
* or to gather snapshot hold's before generating any data,
* then do a non-verbose real run to generate the streams.
*/
sdd.dryrun = B_TRUE;
err = dump_filesystems(zhp, &sdd);
if (err != 0)
goto stderr_out;
if (flags->verbosity != 0) {
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n",
(longlong_t)sdd.size);
} else {
char buf[16];
zfs_nicebytes(sdd.size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
}
/* Ensure no snaps found is treated as an error. */
if (!sdd.seento) {
err = ENOENT;
goto err_out;
}
/* Skip the second run if dryrun was requested. */
if (flags->dryrun)
goto err_out;
if (sdd.snapholds != NULL) {
err = zfs_hold_nvl(zhp, sdd.cleanup_fd, sdd.snapholds);
if (err != 0)
goto stderr_out;
fnvlist_free(sdd.snapholds);
sdd.snapholds = NULL;
}
sdd.dryrun = B_FALSE;
sdd.verbosity = 0;
}
err = dump_filesystems(zhp, &sdd);
fsavl_destroy(fsavl);
fnvlist_free(fss);
/* Ensure no snaps found is treated as an error. */
if (err == 0 && !sdd.seento)
err = ENOENT;
if (sdd.cleanup_fd != -1) {
VERIFY(0 == close(sdd.cleanup_fd));
sdd.cleanup_fd = -1;
}
if (!flags->dryrun && (flags->replicate || flags->doall ||
flags->props || flags->backup || flags->holds)) {
/*
* write final end record. NB: want to do this even if
* there was some error, because it might not be totally
* failed.
*/
int err2 = send_conclusion_record(outfd, NULL);
if (err2 != 0)
return (zfs_standard_error(zhp->zfs_hdl, err2, errbuf));
}
return (err || sdd.err);
stderr_out:
err = zfs_standard_error(zhp->zfs_hdl, err, errbuf);
err_out:
fsavl_destroy(fsavl);
fnvlist_free(fss);
fnvlist_free(sdd.snapholds);
if (sdd.cleanup_fd != -1)
VERIFY(0 == close(sdd.cleanup_fd));
return (err);
}
struct zfs_send {
zfs_handle_t *zhp;
const char *fromsnap;
const char *tosnap;
sendflags_t *flags;
snapfilter_cb_t *filter_func;
void *cb_arg;
nvlist_t **debugnvp;
};
static int
zfs_send_cb(int outfd, void *arg)
{
struct zfs_send *zs = arg;
return (zfs_send_cb_impl(zs->zhp, zs->fromsnap, zs->tosnap, zs->flags,
outfd, zs->filter_func, zs->cb_arg, zs->debugnvp));
}
int
zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
void *cb_arg, nvlist_t **debugnvp)
{
struct zfs_send arg = {
.zhp = zhp,
.fromsnap = fromsnap,
.tosnap = tosnap,
.flags = flags,
.filter_func = filter_func,
.cb_arg = cb_arg,
.debugnvp = debugnvp,
};
return (lzc_send_wrapper(zfs_send_cb, outfd, &arg));
}
static zfs_handle_t *
name_to_dir_handle(libzfs_handle_t *hdl, const char *snapname)
{
char dirname[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(dirname, snapname, ZFS_MAX_DATASET_NAME_LEN);
char *c = strchr(dirname, '@');
if (c != NULL)
*c = '\0';
return (zfs_open(hdl, dirname, ZFS_TYPE_DATASET));
}
/*
* Returns B_TRUE if earlier is an earlier snapshot in later's timeline; either
* an earlier snapshot in the same filesystem, or a snapshot before later's
* origin, or it's origin's origin, etc.
*/
static boolean_t
snapshot_is_before(zfs_handle_t *earlier, zfs_handle_t *later)
{
boolean_t ret;
uint64_t later_txg =
(later->zfs_type == ZFS_TYPE_FILESYSTEM ||
later->zfs_type == ZFS_TYPE_VOLUME ?
UINT64_MAX : zfs_prop_get_int(later, ZFS_PROP_CREATETXG));
uint64_t earlier_txg = zfs_prop_get_int(earlier, ZFS_PROP_CREATETXG);
if (earlier_txg >= later_txg)
return (B_FALSE);
zfs_handle_t *earlier_dir = name_to_dir_handle(earlier->zfs_hdl,
earlier->zfs_name);
zfs_handle_t *later_dir = name_to_dir_handle(later->zfs_hdl,
later->zfs_name);
if (strcmp(earlier_dir->zfs_name, later_dir->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_TRUE);
}
char clonename[ZFS_MAX_DATASET_NAME_LEN];
if (zfs_prop_get(later_dir, ZFS_PROP_ORIGIN, clonename,
ZFS_MAX_DATASET_NAME_LEN, NULL, NULL, 0, B_TRUE) != 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_FALSE);
}
zfs_handle_t *origin = zfs_open(earlier->zfs_hdl, clonename,
ZFS_TYPE_DATASET);
uint64_t origin_txg = zfs_prop_get_int(origin, ZFS_PROP_CREATETXG);
/*
* If "earlier" is exactly the origin, then
* snapshot_is_before(earlier, origin) will return false (because
* they're the same).
*/
if (origin_txg == earlier_txg &&
strcmp(origin->zfs_name, earlier->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
zfs_close(origin);
return (B_TRUE);
}
zfs_close(earlier_dir);
zfs_close(later_dir);
ret = snapshot_is_before(earlier, origin);
zfs_close(origin);
return (ret);
}
/*
* The "zhp" argument is the handle of the dataset to send (typically a
* snapshot). The "from" argument is the full name of the snapshot or
* bookmark that is the incremental source.
*
* Pre-wrapped (cf. lzc_send_wrapper()).
*/
static int
zfs_send_one_cb_impl(zfs_handle_t *zhp, const char *from, int fd,
sendflags_t *flags, const char *redactbook)
{
int err;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *name = zhp->zfs_name;
pthread_t ptid;
progress_arg_t pa = { 0 };
uint64_t size = 0;
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), name);
if (from != NULL && strchr(from, '@')) {
zfs_handle_t *from_zhp = zfs_open(hdl, from,
ZFS_TYPE_DATASET);
if (from_zhp == NULL)
return (-1);
if (!snapshot_is_before(from_zhp, zhp)) {
zfs_close(from_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
zfs_close(from_zhp);
}
if (redactbook != NULL) {
char bookname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *redact_snaps;
zfs_handle_t *book_zhp;
char *at, *pound;
int dsnamelen;
pound = strchr(redactbook, '#');
if (pound != NULL)
redactbook = pound + 1;
at = strchr(name, '@');
if (at == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot do a redacted send to a filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
dsnamelen = at - name;
if (snprintf(bookname, sizeof (bookname), "%.*s#%s",
dsnamelen, name, redactbook)
>= sizeof (bookname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid bookmark name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
book_zhp = zfs_open(hdl, bookname, ZFS_TYPE_BOOKMARK);
if (book_zhp == NULL)
return (-1);
if (nvlist_lookup_nvlist(book_zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
&redact_snaps) != 0 || redact_snaps == NULL) {
zfs_close(book_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a redaction bookmark"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
zfs_close(book_zhp);
}
/*
* Send fs properties
*/
if (flags->props || flags->holds || flags->backup) {
/*
* Note: the header generated by send_prelim_records()
* assumes that the incremental source is in the same
* filesystem/volume as the target (which is a requirement
* when doing "zfs send -R"). But that isn't always the
* case here (e.g. send from snap in origin, or send from
* bookmark). We pass from=NULL, which will omit this
* information from the prelim records; it isn't used
* when receiving this type of stream.
*/
err = send_prelim_records(zhp, NULL, fd, B_TRUE, B_FALSE,
flags->verbosity > 0, flags->dryrun, flags->raw,
flags->replicate, B_FALSE, flags->backup, flags->holds,
flags->props, flags->doall, NULL, NULL);
if (err != 0)
return (err);
}
/*
* Perform size estimate if verbose was specified.
*/
if (flags->verbosity != 0 || flags->progressastitle) {
err = estimate_size(zhp, from, fd, flags, 0, 0, 0, redactbook,
errbuf, &size);
if (err != 0)
return (err);
}
if (flags->dryrun)
return (0);
/*
* If progress reporting is requested, spawn a new thread to poll
* ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
sigset_t oldmask;
{
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
pa.pa_size = size;
pa.pa_astitle = flags->progressastitle;
pa.pa_progress = flags->progress;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", zfs_strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
SEND_PROGRESS_THREAD_PARENT_BLOCK(&oldmask);
}
err = lzc_send_redacted(name, from, fd,
lzc_flags_from_sendflags(flags), redactbook);
if (send_progress_thread_exit(hdl, ptid, &oldmask))
return (-1);
if (err == 0 && (flags->props || flags->holds || flags->backup)) {
/* Write the final end record. */
err = send_conclusion_record(fd, NULL);
if (err != 0)
return (zfs_standard_error(hdl, err, errbuf));
}
if (err != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
case ESRCH:
if (lzc_exists(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
from);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"target is busy; if a filesystem, "
"it must not be mounted"));
return (zfs_error(hdl, EZFS_BUSY, errbuf));
case EDQUOT:
case EFAULT:
case EFBIG:
case EINVAL:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EROFS:
zfs_error_aux(hdl, "%s", zfs_strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
case ZFS_ERR_STREAM_LARGE_MICROZAP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source snapshot contains large microzaps, "
"need -L (--large-block) or -w (--raw) to "
"generate stream"));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (err != 0);
}
struct zfs_send_one {
zfs_handle_t *zhp;
const char *from;
sendflags_t *flags;
const char *redactbook;
};
static int
zfs_send_one_cb(int fd, void *arg)
{
struct zfs_send_one *zso = arg;
return (zfs_send_one_cb_impl(zso->zhp, zso->from, fd, zso->flags,
zso->redactbook));
}
int
zfs_send_one(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
const char *redactbook)
{
struct zfs_send_one zso = {
.zhp = zhp,
.from = from,
.flags = flags,
.redactbook = redactbook,
};
return (lzc_send_wrapper(zfs_send_one_cb, fd, &zso));
}
/*
* Routines specific to "zfs recv"
*/
static int
recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
boolean_t byteswap, zio_cksum_t *zc)
{
char *cp = buf;
int rv;
int len = ilen;
do {
rv = read(fd, cp, len);
cp += rv;
len -= rv;
} while (rv > 0);
if (rv < 0 || len != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to read from stream"));
return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN,
"cannot receive")));
}
if (zc) {
if (byteswap)
fletcher_4_incremental_byteswap(buf, ilen, zc);
else
fletcher_4_incremental_native(buf, ilen, zc);
}
return (0);
}
static int
recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp,
boolean_t byteswap, zio_cksum_t *zc)
{
char *buf;
int err;
buf = zfs_alloc(hdl, len);
if (len > hdl->libzfs_max_nvlist) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "nvlist too large"));
free(buf);
return (ENOMEM);
}
err = recv_read(hdl, fd, buf, len, byteswap, zc);
if (err != 0) {
free(buf);
return (err);
}
err = nvlist_unpack(buf, len, nvp, 0);
free(buf);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (malformed nvlist)"));
return (EINVAL);
}
return (0);
}
/*
* Returns the grand origin (origin of origin of origin...) of a given handle.
* If this dataset is not a clone, it simply returns a copy of the original
* handle.
*/
static zfs_handle_t *
recv_open_grand_origin(zfs_handle_t *zhp)
{
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
zfs_handle_t *ozhp = zfs_handle_dup(zhp);
while (ozhp != NULL) {
if (zfs_prop_get(ozhp, ZFS_PROP_ORIGIN, origin,
sizeof (origin), &src, NULL, 0, B_FALSE) != 0)
break;
(void) zfs_close(ozhp);
ozhp = zfs_open(zhp->zfs_hdl, origin, ZFS_TYPE_FILESYSTEM);
}
return (ozhp);
}
static int
recv_rename_impl(zfs_handle_t *zhp, const char *name, const char *newname)
{
int err;
zfs_handle_t *ozhp = NULL;
/*
* Attempt to rename the dataset. If it fails with EACCES we have
* attempted to rename the dataset outside of its encryption root.
* Force the dataset to become an encryption root and try again.
*/
err = lzc_rename(name, newname);
if (err == EACCES) {
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = ENOENT;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = lzc_rename(name, newname);
}
out:
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname,
int baselen, char *newname, recvflags_t *flags)
{
static int seq;
int err;
prop_changelist_t *clp = NULL;
zfs_handle_t *zhp = NULL;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
if (clp == NULL) {
err = -1;
goto out;
}
err = changelist_prefix(clp);
if (err)
goto out;
if (tryname) {
(void) strlcpy(newname, tryname, ZFS_MAX_DATASET_NAME_LEN);
if (flags->verbose) {
(void) printf("attempting rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, tryname);
} else {
err = ENOENT;
}
if (err != 0 && strncmp(name + baselen, "recv-", 5) != 0) {
seq++;
(void) snprintf(newname, ZFS_MAX_DATASET_NAME_LEN,
"%.*srecv-%u-%u", baselen, name, getpid(), seq);
if (flags->verbose) {
(void) printf("failed - trying rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, newname);
if (err && flags->verbose) {
(void) printf("failed (%u) - "
"will try again on next pass\n", errno);
}
err = EAGAIN;
} else if (flags->verbose) {
if (err == 0)
(void) printf("success\n");
else
(void) printf("failed (%u)\n", errno);
}
(void) changelist_postfix(clp);
out:
if (clp != NULL)
changelist_free(clp);
if (zhp != NULL)
zfs_close(zhp);
return (err);
}
static int
recv_promote(libzfs_handle_t *hdl, const char *fsname,
const char *origin_fsname, recvflags_t *flags)
{
int err;
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = NULL, *ozhp = NULL;
if (flags->verbose)
(void) printf("promoting %s\n", fsname);
(void) strlcpy(zc.zc_value, origin_fsname, sizeof (zc.zc_value));
(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
/*
* Attempt to promote the dataset. If it fails with EACCES the
* promotion would cause this dataset to leave its encryption root.
* Force the origin to become an encryption root and try again.
*/
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
if (err == EACCES) {
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = -1;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
}
out:
if (zhp != NULL)
zfs_close(zhp);
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen,
char *newname, recvflags_t *flags)
{
int err = 0;
prop_changelist_t *clp;
zfs_handle_t *zhp;
boolean_t defer = B_FALSE;
int spa_version;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
zfs_type_t type = zfs_get_type(zhp);
if (type == ZFS_TYPE_SNAPSHOT &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS)
defer = B_TRUE;
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
zfs_close(zhp);
if (clp == NULL)
return (-1);
err = changelist_prefix(clp);
if (err)
return (err);
if (flags->verbose)
(void) printf("attempting destroy %s\n", name);
if (type == ZFS_TYPE_SNAPSHOT) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, name);
err = lzc_destroy_snaps(nv, defer, NULL);
fnvlist_free(nv);
} else {
err = lzc_destroy(name);
}
if (err == 0) {
if (flags->verbose)
(void) printf("success\n");
changelist_remove(clp, name);
}
(void) changelist_postfix(clp);
changelist_free(clp);
/*
* Deferred destroy might destroy the snapshot or only mark it to be
* destroyed later, and it returns success in either case.
*/
if (err != 0 || (defer && zfs_dataset_exists(hdl, name,
ZFS_TYPE_SNAPSHOT))) {
err = recv_rename(hdl, name, NULL, baselen, newname, flags);
}
return (err);
}
typedef struct guid_to_name_data {
uint64_t guid;
boolean_t bookmark_ok;
char *name;
char *skip;
uint64_t *redact_snap_guids;
uint64_t num_redact_snaps;
} guid_to_name_data_t;
static boolean_t
redact_snaps_match(zfs_handle_t *zhp, guid_to_name_data_t *gtnd)
{
uint64_t *bmark_snaps;
uint_t bmark_num_snaps;
nvlist_t *nvl;
if (zhp->zfs_type != ZFS_TYPE_BOOKMARK)
return (B_FALSE);
nvl = fnvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
bmark_snaps = fnvlist_lookup_uint64_array(nvl, ZPROP_VALUE,
&bmark_num_snaps);
if (bmark_num_snaps != gtnd->num_redact_snaps)
return (B_FALSE);
int i = 0;
for (; i < bmark_num_snaps; i++) {
int j = 0;
for (; j < bmark_num_snaps; j++) {
if (bmark_snaps[i] == gtnd->redact_snap_guids[j])
break;
}
if (j == bmark_num_snaps)
break;
}
return (i == bmark_num_snaps);
}
static int
guid_to_name_cb(zfs_handle_t *zhp, void *arg)
{
guid_to_name_data_t *gtnd = arg;
const char *slash;
int err;
if (gtnd->skip != NULL &&
(slash = strrchr(zhp->zfs_name, '/')) != NULL &&
strcmp(slash + 1, gtnd->skip) == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_GUID) == gtnd->guid &&
(gtnd->num_redact_snaps == -1 || redact_snaps_match(zhp, gtnd))) {
(void) strcpy(gtnd->name, zhp->zfs_name);
zfs_close(zhp);
return (EEXIST);
}
err = zfs_iter_children_v2(zhp, 0, guid_to_name_cb, gtnd);
if (err != EEXIST && gtnd->bookmark_ok)
err = zfs_iter_bookmarks_v2(zhp, 0, guid_to_name_cb, gtnd);
zfs_close(zhp);
return (err);
}
/*
* Attempt to find the local dataset associated with this guid. In the case of
* multiple matches, we attempt to find the "best" match by searching
* progressively larger portions of the hierarchy. This allows one to send a
* tree of datasets individually and guarantee that we will find the source
* guid within that hierarchy, even if there are multiple matches elsewhere.
*
* If num_redact_snaps is not -1, we attempt to find a redaction bookmark with
* the specified number of redaction snapshots. If num_redact_snaps isn't 0 or
* -1, then redact_snap_guids will be an array of the guids of the snapshots the
* redaction bookmark was created with. If num_redact_snaps is -1, then we will
* attempt to find a snapshot or bookmark (if bookmark_ok is passed) with the
* given guid. Note that a redaction bookmark can be returned if
* num_redact_snaps == -1.
*/
static int
guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name)
{
char pname[ZFS_MAX_DATASET_NAME_LEN];
guid_to_name_data_t gtnd;
gtnd.guid = guid;
gtnd.bookmark_ok = bookmark_ok;
gtnd.name = name;
gtnd.skip = NULL;
gtnd.redact_snap_guids = redact_snap_guids;
gtnd.num_redact_snaps = num_redact_snaps;
/*
* Search progressively larger portions of the hierarchy, starting
* with the filesystem specified by 'parent'. This will
* select the "most local" version of the origin snapshot in the case
* that there are multiple matching snapshots in the system.
*/
(void) strlcpy(pname, parent, sizeof (pname));
char *cp = strrchr(pname, '@');
if (cp == NULL)
cp = strchr(pname, '\0');
for (; cp != NULL; cp = strrchr(pname, '/')) {
/* Chop off the last component and open the parent */
*cp = '\0';
zfs_handle_t *zhp = make_dataset_handle(hdl, pname);
if (zhp == NULL)
continue;
int err = guid_to_name_cb(zfs_handle_dup(zhp), &gtnd);
if (err != EEXIST)
err = zfs_iter_children_v2(zhp, 0, guid_to_name_cb,
&gtnd);
if (err != EEXIST && bookmark_ok)
err = zfs_iter_bookmarks_v2(zhp, 0, guid_to_name_cb,
&gtnd);
zfs_close(zhp);
if (err == EEXIST)
return (0);
/*
* Remember the last portion of the dataset so we skip it next
* time through (as we've already searched that portion of the
* hierarchy).
*/
gtnd.skip = strrchr(pname, '/') + 1;
}
return (ENOENT);
}
static int
guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid,
boolean_t bookmark_ok, char *name)
{
return (guid_to_name_redact_snaps(hdl, parent, guid, bookmark_ok, NULL,
-1, name));
}
/*
* Return +1 if guid1 is before guid2, 0 if they are the same, and -1 if
* guid1 is after guid2.
*/
static int
created_before(libzfs_handle_t *hdl, avl_tree_t *avl,
uint64_t guid1, uint64_t guid2)
{
nvlist_t *nvfs;
const char *fsname = NULL, *snapname = NULL;
char buf[ZFS_MAX_DATASET_NAME_LEN];
int rv;
zfs_handle_t *guid1hdl, *guid2hdl;
uint64_t create1, create2;
if (guid2 == 0)
return (0);
if (guid1 == 0)
return (1);
nvfs = fsavl_find(avl, guid1, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid1hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid1hdl == NULL)
return (-1);
nvfs = fsavl_find(avl, guid2, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid2hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid2hdl == NULL) {
zfs_close(guid1hdl);
return (-1);
}
create1 = zfs_prop_get_int(guid1hdl, ZFS_PROP_CREATETXG);
create2 = zfs_prop_get_int(guid2hdl, ZFS_PROP_CREATETXG);
if (create1 < create2)
rv = -1;
else if (create1 > create2)
rv = +1;
else
rv = 0;
zfs_close(guid1hdl);
zfs_close(guid2hdl);
return (rv);
}
/*
* This function reestablishes the hierarchy of encryption roots after a
* recursive incremental receive has completed. This must be done after the
* second call to recv_incremental_replication() has renamed and promoted all
* sent datasets to their final locations in the dataset hierarchy.
*/
static int
recv_fix_encryption_hierarchy(libzfs_handle_t *hdl, const char *top_zfs,
- nvlist_t *stream_nv)
+ nvlist_t *stream_nv, avl_tree_t *stream_avl)
{
int err;
nvpair_t *fselem = NULL;
- nvlist_t *stream_fss;
+ nvlist_t *local_nv;
+ avl_tree_t *local_avl;
+ boolean_t recursive;
+
+ recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
+ ENOENT);
- stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
+ /* Using top_zfs, gather the nvlists for all local filesystems. */
+ if ((err = gather_nvlist(hdl, top_zfs, NULL, NULL,
+ recursive, B_TRUE, B_FALSE, recursive, B_FALSE, B_FALSE, B_FALSE,
+ B_FALSE, B_TRUE, &local_nv, &local_avl)) != 0)
+ return (err);
- while ((fselem = nvlist_next_nvpair(stream_fss, fselem)) != NULL) {
+ /*
+ * Go through the nvlists of the local filesystems and check for
+ * encryption roots.
+ */
+ while ((fselem = nvlist_next_nvpair(local_nv, fselem)) != NULL) {
zfs_handle_t *zhp = NULL;
uint64_t crypt;
- nvlist_t *snaps, *props, *stream_nvfs = NULL;
- nvpair_t *snapel = NULL;
+ nvlist_t *stream_props, *snaps, *stream_nvfs = NULL,
+ *nvfs = NULL;
boolean_t is_encroot, is_clone, stream_encroot;
- char *cp;
- const char *stream_keylocation = NULL;
+ const char *stream_keylocation = NULL, *fsname;
char keylocation[MAXNAMELEN];
- char fsname[ZFS_MAX_DATASET_NAME_LEN];
-
- keylocation[0] = '\0';
- stream_nvfs = fnvpair_value_nvlist(fselem);
- snaps = fnvlist_lookup_nvlist(stream_nvfs, "snaps");
- props = fnvlist_lookup_nvlist(stream_nvfs, "props");
- stream_encroot = nvlist_exists(stream_nvfs, "is_encroot");
-
- /* find a snapshot from the stream that exists locally */
- err = ENOENT;
- while ((snapel = nvlist_next_nvpair(snaps, snapel)) != NULL) {
- uint64_t guid;
-
- guid = fnvpair_value_uint64(snapel);
- err = guid_to_name(hdl, top_zfs, guid, B_FALSE,
- fsname);
- if (err == 0)
- break;
- }
-
- if (err != 0)
- continue;
-
- cp = strchr(fsname, '@');
- if (cp != NULL)
- *cp = '\0';
+ nvpair_t *snapelem;
+ nvfs = fnvpair_value_nvlist(fselem);
+ snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
+ fsname = fnvlist_lookup_string(nvfs, "name");
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = ENOENT;
goto error;
}
- crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
- is_clone = zhp->zfs_dmustats.dds_origin[0] != '\0';
- (void) zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
-
/* we don't need to do anything for unencrypted datasets */
+ crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
if (crypt == ZIO_CRYPT_OFF) {
zfs_close(zhp);
continue;
}
+ is_clone = zhp->zfs_dmustats.dds_origin[0] != '\0';
+ (void) zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
+ keylocation[0] = '\0';
+
+ /*
+ * Go through the snapshots of the local filesystem and find
+ * the stream's filesystem.
+ */
+ for (snapelem = nvlist_next_nvpair(snaps, NULL);
+ snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) {
+ uint64_t thisguid;
+
+ thisguid = fnvpair_value_uint64(snapelem);
+ stream_nvfs = fsavl_find(stream_avl, thisguid, NULL);
+
+ if (stream_nvfs != NULL)
+ break;
+ }
+
+ if (stream_nvfs == NULL)
+ continue;
+
+ stream_props = fnvlist_lookup_nvlist(stream_nvfs, "props");
+ stream_encroot = nvlist_exists(stream_nvfs, "is_encroot");
+
/*
* If the dataset is flagged as an encryption root, was not
* received as a clone and is not currently an encryption root,
* force it to become one. Fixup the keylocation if necessary.
*/
if (stream_encroot) {
if (!is_clone && !is_encroot) {
err = lzc_change_key(fsname,
DCP_CMD_FORCE_NEW_KEY, NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
- stream_keylocation = fnvlist_lookup_string(props,
+ stream_keylocation = fnvlist_lookup_string(stream_props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
/*
* Refresh the properties in case the call to
* lzc_change_key() changed the value.
*/
zfs_refresh_properties(zhp);
err = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION,
keylocation, sizeof (keylocation), NULL, NULL,
0, B_TRUE);
if (err != 0) {
zfs_close(zhp);
goto error;
}
if (strcmp(keylocation, stream_keylocation) != 0) {
err = zfs_prop_set(zhp,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
stream_keylocation);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
}
/*
* If the dataset is not flagged as an encryption root and is
* currently an encryption root, force it to inherit from its
* parent. The root of a raw send should never be
* force-inherited.
*/
if (!stream_encroot && is_encroot &&
strcmp(top_zfs, fsname) != 0) {
err = lzc_change_key(fsname, DCP_CMD_FORCE_INHERIT,
NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
zfs_close(zhp);
}
return (0);
error:
return (err);
}
static int
recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs,
recvflags_t *flags, nvlist_t *stream_nv, avl_tree_t *stream_avl,
nvlist_t *renamed)
{
nvlist_t *local_nv, *deleted = NULL;
avl_tree_t *local_avl;
nvpair_t *fselem, *nextfselem;
const char *fromsnap;
char newname[ZFS_MAX_DATASET_NAME_LEN];
char guidname[32];
int error;
boolean_t needagain, progress, recursive;
const char *s1, *s2;
+ if (flags->dryrun)
+ return (0);
+
fromsnap = fnvlist_lookup_string(stream_nv, "fromsnap");
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
- if (flags->dryrun)
- return (0);
-
again:
needagain = progress = B_FALSE;
deleted = fnvlist_alloc();
if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL,
recursive, B_TRUE, B_FALSE, recursive, B_FALSE, B_FALSE, B_FALSE,
B_FALSE, B_TRUE, &local_nv, &local_avl)) != 0)
return (error);
/*
* Process deletes and renames
*/
for (fselem = nvlist_next_nvpair(local_nv, NULL);
fselem; fselem = nextfselem) {
nvlist_t *nvfs, *snaps;
nvlist_t *stream_nvfs = NULL;
nvpair_t *snapelem, *nextsnapelem;
uint64_t fromguid = 0;
uint64_t originguid = 0;
uint64_t stream_originguid = 0;
uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid;
const char *fsname, *stream_fsname;
nextfselem = nvlist_next_nvpair(local_nv, fselem);
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
fsname = fnvlist_lookup_string(nvfs, "name");
parent_fromsnap_guid = fnvlist_lookup_uint64(nvfs,
"parentfromsnap");
(void) nvlist_lookup_uint64(nvfs, "origin", &originguid);
/*
* First find the stream's fs, so we can check for
* a different origin (due to "zfs promote")
*/
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) {
uint64_t thisguid;
thisguid = fnvpair_value_uint64(snapelem);
stream_nvfs = fsavl_find(stream_avl, thisguid, NULL);
if (stream_nvfs != NULL)
break;
}
/* check for promote */
(void) nvlist_lookup_uint64(stream_nvfs, "origin",
&stream_originguid);
if (stream_nvfs && originguid != stream_originguid) {
switch (created_before(hdl, local_avl,
stream_originguid, originguid)) {
case 1: {
/* promote it! */
nvlist_t *origin_nvfs;
const char *origin_fsname;
origin_nvfs = fsavl_find(local_avl, originguid,
NULL);
origin_fsname = fnvlist_lookup_string(
origin_nvfs, "name");
error = recv_promote(hdl, fsname, origin_fsname,
flags);
if (error == 0)
progress = B_TRUE;
break;
}
default:
break;
case -1:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
return (-1);
}
/*
* We had/have the wrong origin, therefore our
* list of snapshots is wrong. Need to handle
* them on the next pass.
*/
needagain = B_TRUE;
continue;
}
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nextsnapelem) {
uint64_t thisguid;
const char *stream_snapname;
nvlist_t *found, *props;
nextsnapelem = nvlist_next_nvpair(snaps, snapelem);
thisguid = fnvpair_value_uint64(snapelem);
found = fsavl_find(stream_avl, thisguid,
&stream_snapname);
/* check for delete */
if (found == NULL) {
char name[ZFS_MAX_DATASET_NAME_LEN];
if (!flags->force)
continue;
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
error = recv_destroy(hdl, name,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)thisguid);
nvlist_add_boolean(deleted, guidname);
continue;
}
stream_nvfs = found;
if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops",
&props) && 0 == nvlist_lookup_nvlist(props,
stream_snapname, &props)) {
zfs_cmd_t zc = {"\0"};
zc.zc_cookie = B_TRUE; /* received */
(void) snprintf(zc.zc_name, sizeof (zc.zc_name),
"%s@%s", fsname, nvpair_name(snapelem));
zcmd_write_src_nvlist(hdl, &zc, props);
(void) zfs_ioctl(hdl,
ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
/* check for different snapname */
if (strcmp(nvpair_name(snapelem),
stream_snapname) != 0) {
char name[ZFS_MAX_DATASET_NAME_LEN];
char tryname[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
(void) snprintf(tryname, sizeof (name), "%s@%s",
fsname, stream_snapname);
error = recv_rename(hdl, name, tryname,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
if (strcmp(stream_snapname, fromsnap) == 0)
fromguid = thisguid;
}
/* check for delete */
if (stream_nvfs == NULL) {
if (!flags->force)
continue;
error = recv_destroy(hdl, fsname, strlen(tofs)+1,
newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
nvlist_add_boolean(deleted, guidname);
continue;
}
if (fromguid == 0) {
if (flags->verbose) {
(void) printf("local fs %s does not have "
"fromsnap (%s in stream); must have "
"been deleted locally; ignoring\n",
fsname, fromsnap);
}
continue;
}
stream_fsname = fnvlist_lookup_string(stream_nvfs, "name");
stream_parent_fromsnap_guid = fnvlist_lookup_uint64(
stream_nvfs, "parentfromsnap");
s1 = strrchr(fsname, '/');
s2 = strrchr(stream_fsname, '/');
/*
* Check if we're going to rename based on parent guid change
* and the current parent guid was also deleted. If it was then
* rename will fail and is likely unneeded, so avoid this and
* force an early retry to determine the new
* parent_fromsnap_guid.
*/
if (stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) {
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
if (nvlist_exists(deleted, guidname)) {
progress = B_TRUE;
needagain = B_TRUE;
goto doagain;
}
}
/*
* Check for rename. If the exact receive path is specified, it
* does not count as a rename, but we still need to check the
* datasets beneath it.
*/
if ((stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) ||
((flags->isprefix || strcmp(tofs, fsname) != 0) &&
(s1 != NULL) && (s2 != NULL) && strcmp(s1, s2) != 0)) {
nvlist_t *parent;
char tryname[ZFS_MAX_DATASET_NAME_LEN];
parent = fsavl_find(local_avl,
stream_parent_fromsnap_guid, NULL);
/*
* NB: parent might not be found if we used the
* tosnap for stream_parent_fromsnap_guid,
* because the parent is a newly-created fs;
* we'll be able to rename it after we recv the
* new fs.
*/
if (parent != NULL) {
const char *pname;
pname = fnvlist_lookup_string(parent, "name");
(void) snprintf(tryname, sizeof (tryname),
"%s%s", pname, strrchr(stream_fsname, '/'));
} else {
tryname[0] = '\0';
if (flags->verbose) {
(void) printf("local fs %s new parent "
"not found\n", fsname);
}
}
newname[0] = '\0';
error = recv_rename(hdl, fsname, tryname,
strlen(tofs)+1, newname, flags);
if (renamed != NULL && newname[0] != '\0') {
fnvlist_add_boolean(renamed, newname);
}
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
}
doagain:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
fnvlist_free(deleted);
if (needagain && progress) {
/* do another pass to fix up temporary names */
if (flags->verbose)
(void) printf("another pass:\n");
goto again;
}
return (needagain || error != 0);
}
static int
zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname,
recvflags_t *flags, dmu_replay_record_t *drr, zio_cksum_t *zc,
char **top_zfs, nvlist_t *cmdprops)
{
nvlist_t *stream_nv = NULL;
avl_tree_t *stream_avl = NULL;
const char *fromsnap = NULL;
const char *sendsnap = NULL;
char *cp;
char tofs[ZFS_MAX_DATASET_NAME_LEN];
char sendfs[ZFS_MAX_DATASET_NAME_LEN];
char errbuf[ERRBUFLEN];
dmu_replay_record_t drre;
int error;
boolean_t anyerr = B_FALSE;
boolean_t softerr = B_FALSE;
boolean_t recursive, raw;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
assert(drr->drr_type == DRR_BEGIN);
assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC);
assert(DMU_GET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
/*
* Read in the nvlist from the stream.
*/
if (drr->drr_payloadlen != 0) {
error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen,
&stream_nv, flags->byteswap, zc);
if (error) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
}
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
raw = (nvlist_lookup_boolean(stream_nv, "raw") == 0);
if (recursive && strchr(destname, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot stream"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
/*
* Read in the end record and verify checksum.
*/
if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre),
flags->byteswap, NULL)))
goto out;
if (flags->byteswap) {
drre.drr_type = BSWAP_32(drre.drr_type);
drre.drr_u.drr_end.drr_checksum.zc_word[0] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]);
drre.drr_u.drr_end.drr_checksum.zc_word[1] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]);
drre.drr_u.drr_end.drr_checksum.zc_word[2] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]);
drre.drr_u.drr_end.drr_checksum.zc_word[3] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]);
}
if (drre.drr_type != DRR_END) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incorrect header checksum"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
(void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap);
if (drr->drr_payloadlen != 0) {
nvlist_t *stream_fss;
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
if ((stream_avl = fsavl_create(stream_fss)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"couldn't allocate avl tree"));
error = zfs_error(hdl, EZFS_NOMEM, errbuf);
goto out;
}
if (fromsnap != NULL && recursive) {
nvlist_t *renamed = NULL;
nvpair_t *pair = NULL;
(void) strlcpy(tofs, destname, sizeof (tofs));
if (flags->isprefix) {
struct drr_begin *drrb = &drr->drr_u.drr_begin;
int i;
if (flags->istail) {
cp = strrchr(drrb->drr_toname, '/');
if (cp == NULL) {
(void) strlcat(tofs, "/",
sizeof (tofs));
i = 0;
} else {
i = (cp - drrb->drr_toname);
}
} else {
i = strcspn(drrb->drr_toname, "/@");
}
/* zfs_receive_one() will create_parents() */
(void) strlcat(tofs, &drrb->drr_toname[i],
sizeof (tofs));
*strchr(tofs, '@') = '\0';
}
if (!flags->dryrun && !flags->nomount) {
renamed = fnvlist_alloc();
}
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, renamed);
/* Unmount renamed filesystems before receiving. */
while ((pair = nvlist_next_nvpair(renamed,
pair)) != NULL) {
zfs_handle_t *zhp;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, nvpair_name(pair),
ZFS_TYPE_FILESYSTEM);
if (zhp != NULL) {
clp = changelist_gather(zhp,
ZFS_PROP_MOUNTPOINT, 0,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp != NULL) {
softerr |=
changelist_prefix(clp);
changelist_free(clp);
}
}
}
fnvlist_free(renamed);
}
}
/*
* Get the fs specified by the first path in the stream (the top level
* specified by 'zfs send') and pass it to each invocation of
* zfs_receive_one().
*/
(void) strlcpy(sendfs, drr->drr_u.drr_begin.drr_toname,
sizeof (sendfs));
if ((cp = strchr(sendfs, '@')) != NULL) {
*cp = '\0';
/*
* Find the "sendsnap", the final snapshot in a replication
* stream. zfs_receive_one() handles certain errors
* differently, depending on if the contained stream is the
* last one or not.
*/
sendsnap = (cp + 1);
}
/* Finally, receive each contained stream */
do {
/*
* we should figure out if it has a recoverable
* error, in which case do a recv_skip() and drive on.
* Note, if we fail due to already having this guid,
* zfs_receive_one() will take care of it (ie,
* recv_skip() and return 0).
*/
error = zfs_receive_impl(hdl, destname, NULL, flags, fd,
sendfs, stream_nv, stream_avl, top_zfs, sendsnap, cmdprops);
if (error == ENODATA) {
error = 0;
break;
}
anyerr |= error;
} while (error == 0);
if (drr->drr_payloadlen != 0 && recursive && fromsnap != NULL) {
/*
* Now that we have the fs's they sent us, try the
* renames again.
*/
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, NULL);
}
- if (raw && softerr == 0 && *top_zfs != NULL) {
+ if (raw && *top_zfs != NULL && !flags->dryrun) {
softerr = recv_fix_encryption_hierarchy(hdl, *top_zfs,
- stream_nv);
+ stream_nv, stream_avl);
}
out:
fsavl_destroy(stream_avl);
fnvlist_free(stream_nv);
if (softerr)
error = -2;
if (anyerr)
error = -1;
return (error);
}
static void
trunc_prop_errs(int truncated)
{
ASSERT(truncated != 0);
if (truncated == 1)
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"1 more property could not be set\n"));
else
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"%d more properties could not be set\n"), truncated);
}
static int
recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
{
dmu_replay_record_t *drr;
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
uint64_t payload_size;
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* XXX would be great to use lseek if possible... */
drr = buf;
while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t),
byteswap, NULL) == 0) {
if (byteswap)
drr->drr_type = BSWAP_32(drr->drr_type);
switch (drr->drr_type) {
case DRR_BEGIN:
if (drr->drr_payloadlen != 0) {
(void) recv_read(hdl, fd, buf,
drr->drr_payloadlen, B_FALSE, NULL);
}
break;
case DRR_END:
free(buf);
return (0);
case DRR_OBJECT:
if (byteswap) {
drr->drr_u.drr_object.drr_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_bonuslen);
drr->drr_u.drr_object.drr_raw_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_raw_bonuslen);
}
payload_size =
DRR_OBJECT_PAYLOAD_SIZE(&drr->drr_u.drr_object);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE:
if (byteswap) {
drr->drr_u.drr_write.drr_logical_size =
BSWAP_64(
drr->drr_u.drr_write.drr_logical_size);
drr->drr_u.drr_write.drr_compressed_size =
BSWAP_64(
drr->drr_u.drr_write.drr_compressed_size);
}
payload_size =
DRR_WRITE_PAYLOAD_SIZE(&drr->drr_u.drr_write);
assert(payload_size <= SPA_MAXBLOCKSIZE);
(void) recv_read(hdl, fd, buf,
payload_size, B_FALSE, NULL);
break;
case DRR_SPILL:
if (byteswap) {
drr->drr_u.drr_spill.drr_length =
BSWAP_64(drr->drr_u.drr_spill.drr_length);
drr->drr_u.drr_spill.drr_compressed_size =
BSWAP_64(drr->drr_u.drr_spill.
drr_compressed_size);
}
payload_size =
DRR_SPILL_PAYLOAD_SIZE(&drr->drr_u.drr_spill);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE_EMBEDDED:
if (byteswap) {
drr->drr_u.drr_write_embedded.drr_psize =
BSWAP_32(drr->drr_u.drr_write_embedded.
drr_psize);
}
(void) recv_read(hdl, fd, buf,
P2ROUNDUP(drr->drr_u.drr_write_embedded.drr_psize,
8), B_FALSE, NULL);
break;
case DRR_OBJECT_RANGE:
case DRR_WRITE_BYREF:
case DRR_FREEOBJECTS:
case DRR_FREE:
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type"));
free(buf);
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
}
free(buf);
return (-1);
}
static void
recv_ecksum_set_aux(libzfs_handle_t *hdl, const char *target_snap,
boolean_t resumable, boolean_t checksum)
{
char target_fs[ZFS_MAX_DATASET_NAME_LEN];
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, (checksum ?
"checksum mismatch" : "incomplete stream")));
if (!resumable)
return;
(void) strlcpy(target_fs, target_snap, sizeof (target_fs));
*strchr(target_fs, '@') = '\0';
zfs_handle_t *zhp = zfs_open(hdl, target_fs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return;
char token_buf[ZFS_MAXPROPLEN];
int error = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf),
NULL, NULL, 0, B_TRUE);
if (error == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"checksum mismatch or incomplete stream.\n"
"Partially received snapshot is saved.\n"
"A resuming stream can be generated on the sending "
"system by running:\n"
" zfs send -t %s"),
token_buf);
}
zfs_close(zhp);
}
/*
* Prepare a new nvlist of properties that are to override (-o) or be excluded
* (-x) from the received dataset
* recvprops: received properties from the send stream
* cmdprops: raw input properties from command line
* origprops: properties, both locally-set and received, currently set on the
* target dataset if it exists, NULL otherwise.
* oxprops: valid output override (-o) and excluded (-x) properties
*/
static int
zfs_setup_cmdline_props(libzfs_handle_t *hdl, zfs_type_t type,
char *fsname, boolean_t zoned, boolean_t recursive, boolean_t newfs,
boolean_t raw, boolean_t toplevel, nvlist_t *recvprops, nvlist_t *cmdprops,
nvlist_t *origprops, nvlist_t **oxprops, uint8_t **wkeydata_out,
uint_t *wkeylen_out, const char *errbuf)
{
nvpair_t *nvp;
nvlist_t *oprops, *voprops;
zfs_handle_t *zhp = NULL;
zpool_handle_t *zpool_hdl = NULL;
char *cp;
int ret = 0;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
if (nvlist_empty(cmdprops))
return (0); /* No properties to override or exclude */
*oxprops = fnvlist_alloc();
oprops = fnvlist_alloc();
strlcpy(namebuf, fsname, ZFS_MAX_DATASET_NAME_LEN);
/*
* Get our dataset handle. The target dataset may not exist yet.
*/
if (zfs_dataset_exists(hdl, namebuf, ZFS_TYPE_DATASET)) {
zhp = zfs_open(hdl, namebuf, ZFS_TYPE_DATASET);
if (zhp == NULL) {
ret = -1;
goto error;
}
}
/* open the zpool handle */
cp = strchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
zpool_hdl = zpool_open(hdl, namebuf);
if (zpool_hdl == NULL) {
ret = -1;
goto error;
}
/* restore namebuf to match fsname for later use */
if (cp != NULL)
*cp = '/';
/*
* first iteration: process excluded (-x) properties now and gather
* added (-o) properties to be later processed by zfs_valid_proplist()
*/
nvp = NULL;
while ((nvp = nvlist_next_nvpair(cmdprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
zfs_prop_t prop = zfs_name_to_prop(name);
/*
* It turns out, if we don't normalize "aliased" names
* e.g. compress= against the "real" names (e.g. compression)
* here, then setting/excluding them does not work as
* intended.
*
* But since user-defined properties wouldn't have a valid
* mapping here, we do this conditional dance.
*/
const char *newname = name;
if (prop >= ZFS_PROP_TYPE)
newname = zfs_prop_to_name(prop);
/* "origin" is processed separately, don't handle it here */
if (prop == ZFS_PROP_ORIGIN)
continue;
/* raw streams can't override encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set or excluded for raw streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* For plain replicated send, we can ignore encryption
* properties other than first stream
*/
if ((zfs_prop_encryption_key_param(prop) || prop ==
ZFS_PROP_ENCRYPTION) && !newfs && recursive && !raw) {
continue;
}
/* incremental streams can only exclude encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && !newfs &&
nvpair_type(nvp) != DATA_TYPE_BOOLEAN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set for incremental streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
switch (nvpair_type(nvp)) {
case DATA_TYPE_BOOLEAN: /* -x property */
/*
* DATA_TYPE_BOOLEAN is the way we're asked to "exclude"
* a property: this is done by forcing an explicit
* inherit on the destination so the effective value is
* not the one we received from the send stream.
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"Warning: %s: property '%s' does not "
"apply to datasets of this type\n"),
fsname, name);
continue;
}
/*
* We do this only if the property is not already
* locally-set, in which case its value will take
* priority over the received anyway.
*/
if (nvlist_exists(origprops, newname)) {
nvlist_t *attrs;
const char *source = NULL;
attrs = fnvlist_lookup_nvlist(origprops,
newname);
if (nvlist_lookup_string(attrs,
ZPROP_SOURCE, &source) == 0 &&
strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)
continue;
}
/*
* We can't force an explicit inherit on non-inheritable
* properties: if we're asked to exclude this kind of
* values we remove them from "recvprops" input nvlist.
*/
if (!zfs_prop_user(name) && /* can be inherited too */
!zfs_prop_inheritable(prop) &&
nvlist_exists(recvprops, newname))
fnvlist_remove(recvprops, newname);
else
fnvlist_add_boolean(*oxprops, newname);
break;
case DATA_TYPE_STRING: /* -o property=value */
/*
* we're trying to override a property that does not
* make sense for this type of dataset, but we don't
* want to fail if the receive is recursive: this comes
* in handy when the send stream contains, for
* instance, a child ZVOL and we're trying to receive
* it with "-o atime=on"
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
if (recursive)
continue;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' does not apply to datasets "
"of this type"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
fnvlist_add_string(oprops, newname,
fnvpair_value_string(nvp));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be a string or boolean"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
}
if (toplevel) {
/* convert override strings properties to native */
if ((voprops = zfs_valid_proplist(hdl, ZFS_TYPE_DATASET,
oprops, zoned, zhp, zpool_hdl, B_FALSE, errbuf)) == NULL) {
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* zfs_crypto_create() requires the parent name. Get it
* by truncating the fsname copy stored in namebuf.
*/
cp = strrchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
if (!raw && !(!newfs && recursive) &&
zfs_crypto_create(hdl, namebuf, voprops, NULL,
B_FALSE, wkeydata_out, wkeylen_out) != 0) {
fnvlist_free(voprops);
ret = zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
goto error;
}
/* second pass: process "-o" properties */
fnvlist_merge(*oxprops, voprops);
fnvlist_free(voprops);
} else {
/* override props on child dataset are inherited */
nvp = NULL;
while ((nvp = nvlist_next_nvpair(oprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
fnvlist_add_boolean(*oxprops, name);
}
}
error:
if (zhp != NULL)
zfs_close(zhp);
if (zpool_hdl != NULL)
zpool_close(zpool_hdl);
fnvlist_free(oprops);
return (ret);
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
*/
static int
zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
const char *originsnap, recvflags_t *flags, dmu_replay_record_t *drr,
dmu_replay_record_t *drr_noswap, const char *sendfs, nvlist_t *stream_nv,
avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
struct timespec begin_time;
int ioctl_err, ioctl_errno, err;
char *cp;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
char errbuf[ERRBUFLEN];
const char *chopprefix;
boolean_t newfs = B_FALSE;
boolean_t stream_wantsnewfs, stream_resumingnewfs;
boolean_t newprops = B_FALSE;
uint64_t read_bytes = 0;
uint64_t errflags = 0;
uint64_t parent_snapguid = 0;
prop_changelist_t *clp = NULL;
nvlist_t *snapprops_nvlist = NULL;
nvlist_t *snapholds_nvlist = NULL;
zprop_errflags_t prop_errflags;
nvlist_t *prop_errors = NULL;
boolean_t recursive;
const char *snapname = NULL;
char destsnap[MAXPATHLEN * 2];
char origin[MAXNAMELEN] = {0};
char name[MAXPATHLEN];
char tmp_keylocation[MAXNAMELEN] = {0};
nvlist_t *rcvprops = NULL; /* props received from the send stream */
nvlist_t *oxprops = NULL; /* override (-o) and exclude (-x) props */
nvlist_t *origprops = NULL; /* original props (if destination exists) */
zfs_type_t type = ZFS_TYPE_INVALID;
boolean_t toplevel = B_FALSE;
boolean_t zoned = B_FALSE;
boolean_t hastoken = B_FALSE;
boolean_t redacted;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif
clock_gettime(CLOCK_MONOTONIC_RAW, &begin_time);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
/* Did the user request holds be skipped via zfs recv -k? */
boolean_t holds = flags->holds && !flags->skipholds;
if (stream_avl != NULL) {
const char *keylocation = NULL;
nvlist_t *lookup = NULL;
nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid,
&snapname);
(void) nvlist_lookup_uint64(fs, "parentfromsnap",
&parent_snapguid);
err = nvlist_lookup_nvlist(fs, "props", &rcvprops);
if (err) {
rcvprops = fnvlist_alloc();
newprops = B_TRUE;
}
/*
* The keylocation property may only be set on encryption roots,
* but this dataset might not become an encryption root until
* recv_fix_encryption_hierarchy() is called. That function
* will fixup the keylocation anyway, so we temporarily unset
* the keylocation for now to avoid any errors from the receive
* ioctl.
*/
err = nvlist_lookup_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
if (err == 0) {
strlcpy(tmp_keylocation, keylocation, MAXNAMELEN);
(void) nvlist_remove_all(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
}
if (flags->canmountoff) {
fnvlist_add_uint64(rcvprops,
zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0);
} else if (newprops) { /* nothing in rcvprops, eliminate it */
fnvlist_free(rcvprops);
rcvprops = NULL;
newprops = B_FALSE;
}
if (0 == nvlist_lookup_nvlist(fs, "snapprops", &lookup)) {
snapprops_nvlist = fnvlist_lookup_nvlist(lookup,
snapname);
}
if (holds) {
if (0 == nvlist_lookup_nvlist(fs, "snapholds",
&lookup)) {
snapholds_nvlist = fnvlist_lookup_nvlist(
lookup, snapname);
}
}
}
cp = NULL;
/*
* Determine how much of the snapshot name stored in the stream
* we are going to tack on to the name they specified on the
* command line, and how much we are going to chop off.
*
* If they specified a snapshot, chop the entire name stored in
* the stream.
*/
if (flags->istail) {
/*
* A filesystem was specified with -e. We want to tack on only
* the tail of the sent snapshot path.
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -e"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strrchr(sendfs, '/');
if (chopprefix == NULL) {
/*
* The tail is the poolname, so we need to
* prepend a path separator.
*/
int len = strlen(drrb->drr_toname);
cp = umem_alloc(len + 2, UMEM_NOFAIL);
cp[0] = '/';
(void) strcpy(&cp[1], drrb->drr_toname);
chopprefix = cp;
} else {
chopprefix = drrb->drr_toname + (chopprefix - sendfs);
}
} else if (flags->isprefix) {
/*
* A filesystem was specified with -d. We want to tack on
* everything but the first element of the sent snapshot path
* (all but the pool name).
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -d"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strchr(drrb->drr_toname, '/');
if (chopprefix == NULL)
chopprefix = strchr(drrb->drr_toname, '@');
} else if (strchr(tosnap, '@') == NULL) {
/*
* If a filesystem was specified without -d or -e, we want to
* tack on everything after the fs specified by 'zfs send'.
*/
chopprefix = drrb->drr_toname + strlen(sendfs);
} else {
/* A snapshot was specified as an exact path (no -d or -e). */
if (recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot "
"stream"));
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
chopprefix = drrb->drr_toname + strlen(drrb->drr_toname);
}
ASSERT(strstr(drrb->drr_toname, sendfs) == drrb->drr_toname);
ASSERT(chopprefix > drrb->drr_toname || strchr(sendfs, '/') == NULL);
ASSERT(chopprefix <= drrb->drr_toname + strlen(drrb->drr_toname) ||
strchr(sendfs, '/') == NULL);
ASSERT(chopprefix[0] == '/' || chopprefix[0] == '@' ||
chopprefix[0] == '\0');
/*
* Determine name of destination snapshot.
*/
(void) strlcpy(destsnap, tosnap, sizeof (destsnap));
(void) strlcat(destsnap, chopprefix, sizeof (destsnap));
if (cp != NULL)
umem_free(cp, strlen(cp) + 1);
if (!zfs_name_valid(destsnap, ZFS_TYPE_SNAPSHOT)) {
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
/*
* Determine the name of the origin snapshot.
*/
if (originsnap) {
(void) strlcpy(origin, originsnap, sizeof (origin));
if (flags->verbose)
(void) printf("using provided clone origin %s\n",
origin);
} else if (drrb->drr_flags & DRR_FLAG_CLONE) {
if (guid_to_name(hdl, destsnap,
drrb->drr_fromguid, B_FALSE, origin) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"local origin for clone %s does not exist"),
destsnap);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
if (flags->verbose)
(void) printf("found clone origin %s\n", origin);
}
if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_DEDUP)) {
(void) fprintf(stderr,
gettext("ERROR: \"zfs receive\" no longer supports "
"deduplicated send streams. Use\n"
"the \"zstream redup\" command to convert this stream "
"to a regular,\n"
"non-deduplicated stream.\n"));
err = zfs_error(hdl, EZFS_NOTSUP, errbuf);
goto out;
}
boolean_t resuming = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RESUMING;
boolean_t raw = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW;
boolean_t embedded = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_EMBED_DATA;
stream_wantsnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && !resuming;
stream_resumingnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && resuming;
if (stream_wantsnewfs) {
/*
* if the parent fs does not exist, look for it based on
* the parent snap GUID
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive new filesystem stream"));
(void) strlcpy(name, destsnap, sizeof (name));
cp = strrchr(name, '/');
if (cp)
*cp = '\0';
if (cp &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char suffix[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(suffix, strrchr(destsnap, '/'),
sizeof (suffix));
if (guid_to_name(hdl, name, parent_snapguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strlcat(destsnap, suffix,
sizeof (destsnap));
}
}
} else {
/*
* If the fs does not exist, look for it based on the
* fromsnap GUID.
*/
if (resuming) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive resume stream"));
} else {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive incremental stream"));
}
(void) strlcpy(name, destsnap, sizeof (name));
*strchr(name, '@') = '\0';
/*
* If the exact receive path was specified and this is the
* topmost path in the stream, then if the fs does not exist we
* should look no further.
*/
if ((flags->isprefix || (*(chopprefix = drrb->drr_toname +
strlen(sendfs)) != '\0' && *chopprefix != '@')) &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char snap[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(snap, strchr(destsnap, '@'),
sizeof (snap));
if (guid_to_name(hdl, name, drrb->drr_fromguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strlcat(destsnap, snap,
sizeof (destsnap));
}
}
}
(void) strlcpy(name, destsnap, sizeof (name));
*strchr(name, '@') = '\0';
redacted = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_REDACTED;
if (flags->heal) {
if (flags->isprefix || flags->istail || flags->force ||
flags->canmountoff || flags->resumable || flags->nomount ||
flags->skipholds) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"corrective recv can not be used when combined with"
" this flag"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
uint64_t guid =
get_snap_guid(hdl, name, strchr(destsnap, '@') + 1);
if (guid == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"corrective recv must specify an existing snapshot"
" to heal"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
} else if (guid != drrb->drr_toguid) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"local snapshot doesn't match the snapshot"
" in the provided stream"));
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
goto out;
}
} else if (zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = NULL;
boolean_t encrypted;
(void) strcpy(zc.zc_name, name);
/*
* Destination fs exists. It must be one of these cases:
* - an incremental send stream
* - the stream specifies a new fs (full stream or clone)
* and they want us to blow away the existing fs (and
* have therefore specified -F and removed any snapshots)
* - we are resuming a failed receive.
*/
if (stream_wantsnewfs) {
boolean_t is_volume = drrb->drr_type == DMU_OST_ZVOL;
if (!flags->force) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' exists\n"
"must specify -F to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (zfs_ioctl(hdl, ZFS_IOC_SNAPSHOT_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has snapshots (eg. %s)\n"
"must destroy them to overwrite it"),
zc.zc_name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume && strrchr(name, '/') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s is the root dataset\n"
"cannot overwrite with a ZVOL"),
name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume &&
zfs_ioctl(hdl, ZFS_IOC_DATASET_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has children (eg. %s)\n"
"cannot overwrite with a ZVOL"),
zc.zc_name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
goto out;
}
}
if ((zhp = zfs_open(hdl, name,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) {
err = -1;
goto out;
}
/*
* When receiving full/newfs on existing dataset, then it
* should be done with "-F" flag. Its enforced for initial
* receive in previous checks in this function.
* Similarly, on resuming full/newfs recv on existing dataset,
* it should be done with "-F" flag.
*
* When dataset doesn't exist, then full/newfs recv is done on
* newly created dataset and it's marked INCONSISTENT. But
* When receiving on existing dataset, recv is first done on
* %recv and its marked INCONSISTENT. Existing dataset is not
* marked INCONSISTENT.
* Resume of full/newfs receive with dataset not INCONSISTENT
* indicates that its resuming newfs on existing dataset. So,
* enforce "-F" flag in this case.
*/
if (stream_resumingnewfs &&
!zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
!flags->force) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Resuming recv on existing destination '%s'\n"
"must specify -F to overwrite it"), name);
err = zfs_error(hdl, EZFS_RESUME_EXISTS, errbuf);
goto out;
}
if (stream_wantsnewfs &&
zhp->zfs_dmustats.dds_origin[0]) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' is a clone\n"
"must destroy it to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
/*
* Raw sends can not be performed as an incremental on top
* of existing unencrypted datasets. zfs recv -F can't be
* used to blow away an existing encrypted filesystem. This
* is because it would require the dsl dir to point to the
* new key (or lack of a key) and the old key at the same
* time. The -F flag may still be used for deleting
* intermediate snapshots that would otherwise prevent the
* receive from working.
*/
encrypted = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) !=
ZIO_CRYPT_OFF;
if (!stream_wantsnewfs && !encrypted && raw) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot perform raw receive on top of "
"existing unencrypted dataset"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (stream_wantsnewfs && flags->force &&
((raw && !encrypted) || encrypted)) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"zfs receive -F cannot be used to destroy an "
"encrypted filesystem or overwrite an "
"unencrypted one with an encrypted one"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (!flags->dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
(stream_wantsnewfs || stream_resumingnewfs)) {
/* We can't do online recv in this case */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->forceunmount ? MS_FORCE : 0);
if (clp == NULL) {
zfs_close(zhp);
err = -1;
goto out;
}
if (changelist_prefix(clp) != 0) {
changelist_free(clp);
zfs_close(zhp);
err = -1;
goto out;
}
}
/*
* If we are resuming a newfs, set newfs here so that we will
* mount it if the recv succeeds this time. We can tell
* that it was a newfs on the first recv because the fs
* itself will be inconsistent (if the fs existed when we
* did the first recv, we would have received it into
* .../%recv).
*/
if (resuming && zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT))
newfs = B_TRUE;
/* we want to know if we're zoned when validating -o|-x props */
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
/* may need this info later, get it now we have zhp around */
if (zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, NULL, 0,
NULL, NULL, 0, B_TRUE) == 0)
hastoken = B_TRUE;
/* gather existing properties on destination */
origprops = fnvlist_alloc();
fnvlist_merge(origprops, zhp->zfs_props);
fnvlist_merge(origprops, zhp->zfs_user_props);
zfs_close(zhp);
} else {
zfs_handle_t *zhp;
/*
* Destination filesystem does not exist. Therefore we better
* be creating a new filesystem (either from a full backup, or
* a clone). It would therefore be invalid if the user
* specified only the pool name (i.e. if the destination name
* contained no slash character).
*/
cp = strrchr(name, '/');
if (!stream_wantsnewfs || cp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' does not exist"), name);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
/*
* Trim off the final dataset component so we perform the
* recvbackup ioctl to the filesystems's parent.
*/
*cp = '\0';
if (flags->isprefix && !flags->istail && !flags->dryrun &&
create_parents(hdl, destsnap, strlen(tosnap)) != 0) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
/* validate parent */
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent '%s' is not a filesystem"), name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
zfs_close(zhp);
goto out;
}
zfs_close(zhp);
newfs = B_TRUE;
*cp = '/';
}
if (flags->verbose) {
(void) printf("%s %s%s stream of %s into %s\n",
flags->dryrun ? "would receive" : "receiving",
flags->heal ? "corrective " : "",
drrb->drr_fromguid ? "incremental" : "full",
drrb->drr_toname, destsnap);
(void) fflush(stdout);
}
/*
* If this is the top-level dataset, record it so we can use it
* for recursive operations later.
*/
if (top_zfs != NULL &&
(*top_zfs == NULL || strcmp(*top_zfs, name) == 0)) {
toplevel = B_TRUE;
if (*top_zfs == NULL)
*top_zfs = zfs_strdup(hdl, name);
}
if (drrb->drr_type == DMU_OST_ZVOL) {
type = ZFS_TYPE_VOLUME;
} else if (drrb->drr_type == DMU_OST_ZFS) {
type = ZFS_TYPE_FILESYSTEM;
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type: 0x%d"), drrb->drr_type);
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if ((err = zfs_setup_cmdline_props(hdl, type, name, zoned, recursive,
stream_wantsnewfs, raw, toplevel, rcvprops, cmdprops, origprops,
&oxprops, &wkeydata, &wkeylen, errbuf)) != 0)
goto out;
/*
* When sending with properties (zfs send -p), the encryption property
* is not included because it is a SETONCE property and therefore
* treated as read only. However, we are always able to determine its
* value because raw sends will include it in the DRR_BDEGIN payload
* and non-raw sends with properties are not allowed for encrypted
* datasets. Therefore, if this is a non-raw properties stream, we can
* infer that the value should be ZIO_CRYPT_OFF and manually add that
* to the received properties.
*/
if (stream_wantsnewfs && !raw && rcvprops != NULL &&
!nvlist_exists(cmdprops, zfs_prop_to_name(ZFS_PROP_ENCRYPTION))) {
if (oxprops == NULL)
oxprops = fnvlist_alloc();
fnvlist_add_uint64(oxprops,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), ZIO_CRYPT_OFF);
}
if (flags->dryrun) {
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
/*
* We have read the DRR_BEGIN record, but we have
* not yet read the payload. For non-dryrun sends
* this will be done by the kernel, so we must
* emulate that here, before attempting to read
* more records.
*/
err = recv_read(hdl, infd, buf, drr->drr_payloadlen,
flags->byteswap, NULL);
free(buf);
if (err != 0)
goto out;
err = recv_skip(hdl, infd, flags->byteswap);
goto out;
}
if (flags->heal) {
err = ioctl_err = lzc_receive_with_heal(destsnap, rcvprops,
oxprops, wkeydata, wkeylen, origin, flags->force,
flags->heal, flags->resumable, raw, infd, drr_noswap, -1,
&read_bytes, &errflags, NULL, &prop_errors);
} else {
err = ioctl_err = lzc_receive_with_cmdprops(destsnap, rcvprops,
oxprops, wkeydata, wkeylen, origin, flags->force,
flags->resumable, raw, infd, drr_noswap, -1, &read_bytes,
&errflags, NULL, &prop_errors);
}
ioctl_errno = ioctl_err;
prop_errflags = errflags;
if (err == 0) {
nvpair_t *prop_err = NULL;
while ((prop_err = nvlist_next_nvpair(prop_errors,
prop_err)) != NULL) {
char tbuf[1024];
zfs_prop_t prop;
int intval;
prop = zfs_name_to_prop(nvpair_name(prop_err));
(void) nvpair_value_int32(prop_err, &intval);
if (strcmp(nvpair_name(prop_err),
ZPROP_N_MORE_ERRORS) == 0) {
trunc_prop_errs(intval);
break;
} else if (snapname == NULL || finalsnap == NULL ||
strcmp(finalsnap, snapname) == 0 ||
strcmp(nvpair_name(prop_err),
zfs_prop_to_name(ZFS_PROP_REFQUOTA)) != 0) {
/*
* Skip the special case of, for example,
* "refquota", errors on intermediate
* snapshots leading up to a final one.
* That's why we have all of the checks above.
*
* See zfs_ioctl.c's extract_delay_props() for
* a list of props which can fail on
* intermediate snapshots, but shouldn't
* affect the overall receive.
*/
(void) snprintf(tbuf, sizeof (tbuf),
dgettext(TEXT_DOMAIN,
"cannot receive %s property on %s"),
nvpair_name(prop_err), name);
zfs_setprop_error(hdl, prop, intval, tbuf);
}
}
}
if (err == 0 && snapprops_nvlist) {
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, destsnap, sizeof (zc.zc_name));
zc.zc_cookie = B_TRUE; /* received */
zcmd_write_src_nvlist(hdl, &zc, snapprops_nvlist);
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
if (err == 0 && snapholds_nvlist) {
nvpair_t *pair;
nvlist_t *holds, *errors = NULL;
int cleanup_fd = -1;
VERIFY(0 == nvlist_alloc(&holds, 0, KM_SLEEP));
for (pair = nvlist_next_nvpair(snapholds_nvlist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(snapholds_nvlist, pair)) {
fnvlist_add_string(holds, destsnap, nvpair_name(pair));
}
(void) lzc_hold(holds, cleanup_fd, &errors);
fnvlist_free(snapholds_nvlist);
fnvlist_free(holds);
}
if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) {
/*
* It may be that this snapshot already exists,
* in which case we want to consume & ignore it
* rather than failing.
*/
avl_tree_t *local_avl;
nvlist_t *local_nv, *fs;
cp = strchr(destsnap, '@');
/*
* XXX Do this faster by just iterating over snaps in
* this fs. Also if zc_value does not exist, we will
* get a strange "does not exist" error message.
*/
*cp = '\0';
if (gather_nvlist(hdl, destsnap, NULL, NULL, B_FALSE, B_TRUE,
B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE,
B_TRUE, &local_nv, &local_avl) == 0) {
*cp = '@';
fs = fsavl_find(local_avl, drrb->drr_toguid, NULL);
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
if (fs != NULL) {
if (flags->verbose) {
(void) printf("snap %s already exists; "
"ignoring\n", destsnap);
}
err = ioctl_err = recv_skip(hdl, infd,
flags->byteswap);
}
}
*cp = '@';
}
if (ioctl_err != 0) {
switch (ioctl_errno) {
case ENODEV:
cp = strchr(destsnap, '@');
*cp = '\0';
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"most recent snapshot of %s does not\n"
"match incremental source"), destsnap);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
*cp = '@';
break;
case ETXTBSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s has been modified\n"
"since most recent snapshot"), name);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
break;
case EACCES:
if (flags->heal) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"key must be loaded to do a non-raw "
"corrective recv on an encrypted "
"dataset."));
} else if (raw && stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to create encryption key"));
} else if (raw && !stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption key does not match "
"existing key"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"inherited key must be loaded"));
}
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
break;
case EEXIST:
cp = strchr(destsnap, '@');
if (newfs) {
/* it's the containing fs that exists */
*cp = '\0';
}
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination already exists"));
(void) zfs_error_fmt(hdl, EZFS_EXISTS,
dgettext(TEXT_DOMAIN, "cannot restore to %s"),
destsnap);
*cp = '@';
break;
case EINVAL:
if (embedded && !raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incompatible embedded data stream "
"feature with encrypted receive."));
} else if (flags->resumable) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"kernel modules must be upgraded to "
"receive this stream."));
}
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ECKSUM:
case ZFS_ERR_STREAM_TRUNCATED:
if (flags->heal)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"corrective receive was not able to "
"reconstruct the data needed for "
"healing."));
else
recv_ecksum_set_aux(hdl, destsnap,
flags->resumable, ioctl_err == ECKSUM);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental send stream requires -L "
"(--large-block), to match previous receive."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ENOTSUP:
if (flags->heal)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream is not compatible with the "
"data in the pool."));
else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to receive this "
"stream."));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case ZFS_ERR_CRYPTO_NOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream uses crypto parameters not compatible with "
"this pool"));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case EDQUOT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s space quota exceeded."), name);
(void) zfs_error(hdl, EZFS_NOSPC, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid missing. See errata %u at "
"https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-ER."),
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid mismatch. See the 'zfs receive' "
"man page section\n discussing the limitations "
"of raw encrypted send streams."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_SPILL_BLOCK_FLAG_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Spill block flag missing for raw send.\n"
"The zfs software on the sending system must "
"be updated."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_RESUME_EXISTS:
cp = strchr(destsnap, '@');
if (newfs) {
/* it's the containing fs that exists */
*cp = '\0';
}
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Resuming recv on existing dataset without force"));
(void) zfs_error_fmt(hdl, EZFS_RESUME_EXISTS,
dgettext(TEXT_DOMAIN, "cannot resume recv %s"),
destsnap);
*cp = '@';
break;
case E2BIG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"zfs receive required kernel memory allocation "
"larger than the system can support. Please file "
"an issue at the OpenZFS issue tracker:\n"
"https://github.com/openzfs/zfs/issues/new"));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case EBUSY:
if (hastoken) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s contains "
"partially-complete state from "
"\"zfs receive -s\"."), name);
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
break;
}
zfs_fallthrough;
default:
(void) zfs_standard_error(hdl, ioctl_errno, errbuf);
}
}
/*
* Mount the target filesystem (if created). Also mount any
* children of the target filesystem if we did a replication
* receive (indicated by stream_avl being non-NULL).
*/
if (clp) {
if (!flags->nomount)
err |= changelist_postfix(clp);
changelist_free(clp);
}
if ((newfs || stream_avl) && type == ZFS_TYPE_FILESYSTEM && !redacted)
flags->domount = B_TRUE;
if (prop_errflags & ZPROP_ERR_NOCLEAR) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to clear unreceived properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (prop_errflags & ZPROP_ERR_NORESTORE) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to restore original properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (err || ioctl_err) {
err = -1;
goto out;
}
if (flags->verbose) {
char buf1[64];
char buf2[64];
uint64_t bytes = read_bytes;
struct timespec delta;
clock_gettime(CLOCK_MONOTONIC_RAW, &delta);
if (begin_time.tv_nsec > delta.tv_nsec) {
delta.tv_nsec =
1000000000 + delta.tv_nsec - begin_time.tv_nsec;
delta.tv_sec -= 1;
} else
delta.tv_nsec -= begin_time.tv_nsec;
delta.tv_sec -= begin_time.tv_sec;
if (delta.tv_sec == 0 && delta.tv_nsec == 0)
delta.tv_nsec = 1;
double delta_f = delta.tv_sec + (delta.tv_nsec / 1e9);
zfs_nicebytes(bytes, buf1, sizeof (buf1));
zfs_nicebytes(bytes / delta_f, buf2, sizeof (buf2));
(void) printf("received %s stream in %.2f seconds (%s/sec)\n",
buf1, delta_f, buf2);
}
err = 0;
out:
if (prop_errors != NULL)
fnvlist_free(prop_errors);
if (tmp_keylocation[0] != '\0') {
fnvlist_add_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), tmp_keylocation);
}
if (newprops)
fnvlist_free(rcvprops);
fnvlist_free(oxprops);
fnvlist_free(origprops);
return (err);
}
/*
* Check properties we were asked to override (both -o|-x)
*/
static boolean_t
zfs_receive_checkprops(libzfs_handle_t *hdl, nvlist_t *props,
const char *errbuf)
{
nvpair_t *nvp = NULL;
zfs_prop_t prop;
const char *name;
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
name = nvpair_name(nvp);
prop = zfs_name_to_prop(name);
if (prop == ZPROP_USERPROP) {
if (!zfs_prop_user(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s: invalid property '%s'"), errbuf, name);
return (B_FALSE);
}
continue;
}
/*
* "origin" is readonly but is used to receive datasets as
* clones so we don't raise an error here
*/
if (prop == ZFS_PROP_ORIGIN)
continue;
/* encryption params have their own verification later */
if (prop == ZFS_PROP_ENCRYPTION ||
zfs_prop_encryption_key_param(prop))
continue;
/*
* cannot override readonly, set-once and other specific
* settable properties
*/
if (zfs_prop_readonly(prop) || prop == ZFS_PROP_VERSION ||
prop == ZFS_PROP_VOLSIZE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s: invalid property '%s'"), errbuf, name);
return (B_FALSE);
}
}
return (B_TRUE);
}
static int
zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
const char *originsnap, recvflags_t *flags, int infd, const char *sendfs,
nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
int err;
dmu_replay_record_t drr, drr_noswap;
struct drr_begin *drrb = &drr.drr_u.drr_begin;
char errbuf[ERRBUFLEN];
zio_cksum_t zcksum = { { 0 } };
uint64_t featureflags;
int hdrtype;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* check cmdline props, raise an error if they cannot be received */
if (!zfs_receive_checkprops(hdl, cmdprops, errbuf))
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
if (flags->isprefix &&
!zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs "
"(%s) does not exist"), tosnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
if (originsnap &&
!zfs_dataset_exists(hdl, originsnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified origin fs "
"(%s) does not exist"), originsnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* read in the BEGIN record */
if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE,
&zcksum)))
return (err);
if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) {
/* It's the double end record at the end of a package */
return (ENODATA);
}
/* the kernel needs the non-byteswapped begin record */
drr_noswap = drr;
flags->byteswap = B_FALSE;
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
/*
* We computed the checksum in the wrong byteorder in
* recv_read() above; do it again correctly.
*/
memset(&zcksum, 0, sizeof (zio_cksum_t));
fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum);
flags->byteswap = B_TRUE;
drr.drr_type = BSWAP_32(drr.drr_type);
drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen);
drrb->drr_magic = BSWAP_64(drrb->drr_magic);
drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
drrb->drr_type = BSWAP_32(drrb->drr_type);
drrb->drr_flags = BSWAP_32(drrb->drr_flags);
drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
}
if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad magic number)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
hdrtype = DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo);
if (!DMU_STREAM_SUPPORTED(featureflags) ||
(hdrtype != DMU_SUBSTREAM && hdrtype != DMU_COMPOUNDSTREAM)) {
/*
* Let's be explicit about this one, since rather than
* being a new feature we can't know, it's an old
* feature we dropped.
*/
if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream has deprecated feature: dedup, try "
"'zstream redup [send in a file] | zfs recv "
"[...]'"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream has unsupported feature, feature flags = "
"%llx (unknown flags = %llx)"),
(u_longlong_t)featureflags,
(u_longlong_t)((featureflags) &
~DMU_BACKUP_FEATURE_MASK));
}
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
/* Holds feature is set once in the compound stream header. */
if (featureflags & DMU_BACKUP_FEATURE_HOLDS)
flags->holds = B_TRUE;
if (strchr(drrb->drr_toname, '@') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad snapshot name)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_SUBSTREAM) {
char nonpackage_sendfs[ZFS_MAX_DATASET_NAME_LEN];
if (sendfs == NULL) {
/*
* We were not called from zfs_receive_package(). Get
* the fs specified by 'zfs send'.
*/
char *cp;
(void) strlcpy(nonpackage_sendfs,
drr.drr_u.drr_begin.drr_toname,
sizeof (nonpackage_sendfs));
if ((cp = strchr(nonpackage_sendfs, '@')) != NULL)
*cp = '\0';
sendfs = nonpackage_sendfs;
VERIFY(finalsnap == NULL);
}
return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags,
&drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs,
finalsnap, cmdprops));
} else {
assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
return (zfs_receive_package(hdl, infd, tosnap, flags, &drr,
&zcksum, top_zfs, cmdprops));
}
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
* Return 0 on total success, -2 if some things couldn't be
* destroyed/renamed/promoted, -1 if some things couldn't be received.
* (-1 will override -2, if -1 and the resumable flag was specified the
* transfer can be resumed if the sending side supports it).
*/
int
zfs_receive(libzfs_handle_t *hdl, const char *tosnap, nvlist_t *props,
recvflags_t *flags, int infd, avl_tree_t *stream_avl)
{
char *top_zfs = NULL;
int err;
struct stat sb;
const char *originsnap = NULL;
/*
* The only way fstat can fail is if we do not have a valid file
* descriptor.
*/
if (fstat(infd, &sb) == -1) {
perror("fstat");
return (-2);
}
if (props) {
err = nvlist_lookup_string(props, "origin", &originsnap);
if (err && err != ENOENT)
return (err);
}
err = zfs_receive_impl(hdl, tosnap, originsnap, flags, infd, NULL, NULL,
stream_avl, &top_zfs, NULL, props);
if (err == 0 && !flags->nomount && flags->domount && top_zfs) {
zfs_handle_t *zhp = NULL;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, top_zfs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
err = -1;
goto out;
} else {
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
CL_GATHER_MOUNT_ALWAYS,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp == NULL) {
err = -1;
goto out;
}
/* mount and share received datasets */
err = changelist_postfix(clp);
changelist_free(clp);
if (err != 0)
err = -1;
}
}
out:
if (top_zfs)
free(top_zfs);
return (err);
}
diff --git a/sys/contrib/openzfs/man/man4/zfs.4 b/sys/contrib/openzfs/man/man4/zfs.4
index dd0b3d848fe9..9d83357fcc6d 100644
--- a/sys/contrib/openzfs/man/man4/zfs.4
+++ b/sys/contrib/openzfs/man/man4/zfs.4
@@ -1,2872 +1,2872 @@
.\"
.\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\" Copyright (c) 2019, 2021 by Delphix. All rights reserved.
.\" Copyright (c) 2019 Datto Inc.
.\" Copyright (c) 2023, 2024 Klara, Inc.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or https://opensource.org/licenses/CDDL-1.0.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" Copyright (c) 2024, Klara, Inc.
.\"
.Dd November 1, 2024
.Dt ZFS 4
.Os
.
.Sh NAME
.Nm zfs
.Nd tuning of the ZFS kernel module
.
.Sh DESCRIPTION
The ZFS module supports these parameters:
.Bl -tag -width Ds
.It Sy dbuf_cache_max_bytes Ns = Ns Sy UINT64_MAX Ns B Pq u64
Maximum size in bytes of the dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_cache_shift Pq 1/32nd
of the target ARC size.
The behavior of the dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_metadata_cache_max_bytes Ns = Ns Sy UINT64_MAX Ns B Pq u64
Maximum size in bytes of the metadata dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_metadata_cache_shift Pq 1/64th
of the target ARC size.
The behavior of the metadata dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_cache_hiwater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage over
.Sy dbuf_cache_max_bytes
when dbufs must be evicted directly.
.
.It Sy dbuf_cache_lowater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage below
.Sy dbuf_cache_max_bytes
when the evict thread stops evicting dbufs.
.
.It Sy dbuf_cache_shift Ns = Ns Sy 5 Pq uint
Set the size of the dbuf cache
.Pq Sy dbuf_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_metadata_cache_shift Ns = Ns Sy 6 Pq uint
Set the size of the dbuf metadata cache
.Pq Sy dbuf_metadata_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_mutex_cache_shift Ns = Ns Sy 0 Pq uint
Set the size of the mutex array for the dbuf cache.
When set to
.Sy 0
the array is dynamically sized based on total system memory.
.
.It Sy dmu_object_alloc_chunk_shift Ns = Ns Sy 7 Po 128 Pc Pq uint
dnode slots allocated in a single operation as a power of 2.
The default value minimizes lock contention for the bulk operation performed.
.
.It Sy dmu_ddt_copies Ns = Ns Sy 3 Pq uint
Controls the number of copies stored for DeDup Table
.Pq DDT
objects.
Reducing the number of copies to 1 from the previous default of 3
can reduce the write inflation caused by deduplication.
This assumes redundancy for this data is provided by the vdev layer.
If the DDT is damaged, space may be leaked
.Pq not freed
when the DDT can not report the correct reference count.
.
.It Sy dmu_prefetch_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
Limit the amount we can prefetch with one call to this amount in bytes.
This helps to limit the amount of memory that can be used by prefetching.
.
.It Sy ignore_hole_birth Pq int
Alias for
.Sy send_holes_without_birth_time .
.
.It Sy l2arc_feed_again Ns = Ns Sy 1 Ns | Ns 0 Pq int
Turbo L2ARC warm-up.
When the L2ARC is cold the fill interval will be set as fast as possible.
.
.It Sy l2arc_feed_min_ms Ns = Ns Sy 200 Pq u64
Min feed interval in milliseconds.
Requires
.Sy l2arc_feed_again Ns = Ns Ar 1
and only applicable in related situations.
.
.It Sy l2arc_feed_secs Ns = Ns Sy 1 Pq u64
Seconds between L2ARC writing.
.
.It Sy l2arc_headroom Ns = Ns Sy 8 Pq u64
How far through the ARC lists to search for L2ARC cacheable content,
expressed as a multiplier of
.Sy l2arc_write_max .
ARC persistence across reboots can be achieved with persistent L2ARC
by setting this parameter to
.Sy 0 ,
allowing the full length of ARC lists to be searched for cacheable content.
.
.It Sy l2arc_headroom_boost Ns = Ns Sy 200 Ns % Pq u64
Scales
.Sy l2arc_headroom
by this percentage when L2ARC contents are being successfully compressed
before writing.
A value of
.Sy 100
disables this feature.
.
.It Sy l2arc_exclude_special Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether buffers present on special vdevs are eligible for caching
into L2ARC.
If set to 1, exclude dbufs on special vdevs from being cached to L2ARC.
.
.It Sy l2arc_mfuonly Ns = Ns Sy 0 Ns | Ns 1 Ns | Ns 2 Pq int
Controls whether only MFU metadata and data are cached from ARC into L2ARC.
This may be desired to avoid wasting space on L2ARC when reading/writing large
amounts of data that are not expected to be accessed more than once.
.Pp
The default is 0,
meaning both MRU and MFU data and metadata are cached.
When turning off this feature (setting it to 0), some MRU buffers will
still be present in ARC and eventually cached on L2ARC.
.No If Sy l2arc_noprefetch Ns = Ns Sy 0 ,
some prefetched buffers will be cached to L2ARC, and those might later
transition to MRU, in which case the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
Setting it to 1 means to L2 cache only MFU data and metadata.
.Pp
Setting it to 2 means to L2 cache all metadata (MRU+MFU) but
only MFU data (ie: MRU data are not cached). This can be the right setting
to cache as much metadata as possible even when having high data turnover.
.Pp
Regardless of
.Sy l2arc_noprefetch ,
some MFU buffers might be evicted from ARC,
accessed later on as prefetches and transition to MRU as prefetches.
If accessed again they are counted as MRU and the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
The ARC status of L2ARC buffers when they were first cached in
L2ARC can be seen in the
.Sy l2arc_mru_asize , Sy l2arc_mfu_asize , No and Sy l2arc_prefetch_asize
arcstats when importing the pool or onlining a cache
device if persistent L2ARC is enabled.
.Pp
The
.Sy evict_l2_eligible_mru
arcstat does not take into account if this option is enabled as the information
provided by the
.Sy evict_l2_eligible_m[rf]u
arcstats can be used to decide if toggling this option is appropriate
for the current workload.
.
.It Sy l2arc_meta_percent Ns = Ns Sy 33 Ns % Pq uint
Percent of ARC size allowed for L2ARC-only headers.
Since L2ARC buffers are not evicted on memory pressure,
too many headers on a system with an irrationally large L2ARC
can render it slow or unusable.
This parameter limits L2ARC writes and rebuilds to achieve the target.
.
.It Sy l2arc_trim_ahead Ns = Ns Sy 0 Ns % Pq u64
Trims ahead of the current write size
.Pq Sy l2arc_write_max
on L2ARC devices by this percentage of write size if we have filled the device.
If set to
.Sy 100
we TRIM twice the space required to accommodate upcoming writes.
A minimum of
.Sy 64 MiB
will be trimmed.
It also enables TRIM of the whole L2ARC device upon creation
or addition to an existing pool or if the header of the device is
invalid upon importing a pool or onlining a cache device.
A value of
.Sy 0
disables TRIM on L2ARC altogether and is the default as it can put significant
stress on the underlying storage devices.
This will vary depending of how well the specific device handles these commands.
.
.It Sy l2arc_noprefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
Do not write buffers to L2ARC if they were prefetched but not used by
applications.
In case there are prefetched buffers in L2ARC and this option
is later set, we do not read the prefetched buffers from L2ARC.
Unsetting this option is useful for caching sequential reads from the
disks to L2ARC and serve those reads from L2ARC later on.
This may be beneficial in case the L2ARC device is significantly faster
in sequential reads than the disks of the pool.
.Pp
Use
.Sy 1
to disable and
.Sy 0
to enable caching/reading prefetches to/from L2ARC.
.
.It Sy l2arc_norw Ns = Ns Sy 0 Ns | Ns 1 Pq int
No reads during writes.
.
.It Sy l2arc_write_boost Ns = Ns Sy 33554432 Ns B Po 32 MiB Pc Pq u64
Cold L2ARC devices will have
.Sy l2arc_write_max
increased by this amount while they remain cold.
.
.It Sy l2arc_write_max Ns = Ns Sy 33554432 Ns B Po 32 MiB Pc Pq u64
Max write bytes per interval.
.
.It Sy l2arc_rebuild_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Rebuild the L2ARC when importing a pool (persistent L2ARC).
This can be disabled if there are problems importing a pool
or attaching an L2ARC device (e.g. the L2ARC device is slow
in reading stored log metadata, or the metadata
has become somehow fragmented/unusable).
.
.It Sy l2arc_rebuild_blocks_min_l2size Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Mininum size of an L2ARC device required in order to write log blocks in it.
The log blocks are used upon importing the pool to rebuild the persistent L2ARC.
.Pp
For L2ARC devices less than 1 GiB, the amount of data
.Fn l2arc_evict
evicts is significant compared to the amount of restored L2ARC data.
In this case, do not write log blocks in L2ARC in order not to waste space.
.
.It Sy metaslab_aliquot Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Metaslab granularity, in bytes.
This is roughly similar to what would be referred to as the "stripe size"
in traditional RAID arrays.
In normal operation, ZFS will try to write this amount of data to each disk
before moving on to the next top-level vdev.
.
.It Sy metaslab_bias_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group biasing based on their vdevs' over- or under-utilization
relative to the pool.
.
.It Sy metaslab_force_ganging Ns = Ns Sy 16777217 Ns B Po 16 MiB + 1 B Pc Pq u64
Make some blocks above a certain size be gang blocks.
This option is used by the test suite to facilitate testing.
.
.It Sy metaslab_force_ganging_pct Ns = Ns Sy 3 Ns % Pq uint
For blocks that could be forced to be a gang block (due to
.Sy metaslab_force_ganging ) ,
force this many of them to be gang blocks.
.
.It Sy brt_zap_prefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
Controls prefetching BRT records for blocks which are going to be cloned.
.
.It Sy brt_zap_default_bs Ns = Ns Sy 12 Po 4 KiB Pc Pq int
Default BRT ZAP data block size as a power of 2. Note that changing this after
creating a BRT on the pool will not affect existing BRTs, only newly created
ones.
.
.It Sy brt_zap_default_ibs Ns = Ns Sy 12 Po 4 KiB Pc Pq int
Default BRT ZAP indirect block size as a power of 2. Note that changing this
after creating a BRT on the pool will not affect existing BRTs, only newly
created ones.
.
.It Sy ddt_zap_default_bs Ns = Ns Sy 15 Po 32 KiB Pc Pq int
Default DDT ZAP data block size as a power of 2. Note that changing this after
creating a DDT on the pool will not affect existing DDTs, only newly created
ones.
.
.It Sy ddt_zap_default_ibs Ns = Ns Sy 15 Po 32 KiB Pc Pq int
Default DDT ZAP indirect block size as a power of 2. Note that changing this
after creating a DDT on the pool will not affect existing DDTs, only newly
created ones.
.
.It Sy zfs_default_bs Ns = Ns Sy 9 Po 512 B Pc Pq int
Default dnode block size as a power of 2.
.
.It Sy zfs_default_ibs Ns = Ns Sy 17 Po 128 KiB Pc Pq int
Default dnode indirect block size as a power of 2.
.
.It Sy zfs_dio_enabled Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enable Direct I/O.
If this setting is 0, then all I/O requests will be directed through the ARC
acting as though the dataset property
.Sy direct
was set to
.Sy disabled .
.
.It Sy zfs_history_output_max Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
When attempting to log an output nvlist of an ioctl in the on-disk history,
the output will not be stored if it is larger than this size (in bytes).
This must be less than
.Sy DMU_MAX_ACCESS Pq 64 MiB .
This applies primarily to
.Fn zfs_ioc_channel_program Pq cf. Xr zfs-program 8 .
.
.It Sy zfs_keep_log_spacemaps_at_export Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent log spacemaps from being destroyed during pool exports and destroys.
.
.It Sy zfs_metaslab_segment_weight_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable segment-based metaslab selection.
.
.It Sy zfs_metaslab_switch_threshold Ns = Ns Sy 2 Pq int
When using segment-based metaslab selection, continue allocating
from the active metaslab until this option's
worth of buckets have been exhausted.
.
.It Sy metaslab_debug_load Ns = Ns Sy 0 Ns | Ns 1 Pq int
Load all metaslabs during pool import.
.
.It Sy metaslab_debug_unload Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent metaslabs from being unloaded.
.
.It Sy metaslab_fragmentation_factor_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable use of the fragmentation metric in computing metaslab weights.
.
.It Sy metaslab_df_max_search Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
Maximum distance to search forward from the last offset.
Without this limit, fragmented pools can see
.Em >100`000
iterations and
.Fn metaslab_block_picker
becomes the performance limiting factor on high-performance storage.
.Pp
With the default setting of
.Sy 16 MiB ,
we typically see less than
.Em 500
iterations, even with very fragmented
.Sy ashift Ns = Ns Sy 9
pools.
The maximum number of iterations possible is
.Sy metaslab_df_max_search / 2^(ashift+1) .
With the default setting of
.Sy 16 MiB
this is
.Em 16*1024 Pq with Sy ashift Ns = Ns Sy 9
or
.Em 2*1024 Pq with Sy ashift Ns = Ns Sy 12 .
.
.It Sy metaslab_df_use_largest_segment Ns = Ns Sy 0 Ns | Ns 1 Pq int
If not searching forward (due to
.Sy metaslab_df_max_search , metaslab_df_free_pct ,
.No or Sy metaslab_df_alloc_threshold ) ,
this tunable controls which segment is used.
If set, we will use the largest free segment.
If unset, we will use a segment of at least the requested size.
.
.It Sy zfs_metaslab_max_size_cache_sec Ns = Ns Sy 3600 Ns s Po 1 hour Pc Pq u64
When we unload a metaslab, we cache the size of the largest free chunk.
We use that cached size to determine whether or not to load a metaslab
for a given allocation.
As more frees accumulate in that metaslab while it's unloaded,
the cached max size becomes less and less accurate.
After a number of seconds controlled by this tunable,
we stop considering the cached max size and start
considering only the histogram instead.
.
.It Sy zfs_metaslab_mem_limit Ns = Ns Sy 25 Ns % Pq uint
When we are loading a new metaslab, we check the amount of memory being used
to store metaslab range trees.
If it is over a threshold, we attempt to unload the least recently used metaslab
to prevent the system from clogging all of its memory with range trees.
This tunable sets the percentage of total system memory that is the threshold.
.
.It Sy zfs_metaslab_try_hard_before_gang Ns = Ns Sy 0 Ns | Ns 1 Pq int
.Bl -item -compact
.It
If unset, we will first try normal allocation.
.It
If that fails then we will do a gang allocation.
.It
If that fails then we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.Pp
.Bl -item -compact
.It
If set, we will first try normal allocation.
.It
If that fails then we will do a "try hard" allocation.
.It
If that fails we will do a gang allocation.
.It
If that fails we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.
.It Sy zfs_metaslab_find_max_tries Ns = Ns Sy 100 Pq uint
When not trying hard, we only consider this number of the best metaslabs.
This improves performance, especially when there are many metaslabs per vdev
and the allocation can't actually be satisfied
(so we would otherwise iterate all metaslabs).
.
.It Sy zfs_vdev_default_ms_count Ns = Ns Sy 200 Pq uint
When a vdev is added, target this number of metaslabs per top-level vdev.
.
.It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512 MiB Pc Pq uint
Default lower limit for metaslab size.
.
.It Sy zfs_vdev_max_ms_shift Ns = Ns Sy 34 Po 16 GiB Pc Pq uint
Default upper limit for metaslab size.
.
.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy 14 Pq uint
Maximum ashift used when optimizing for logical \[->] physical sector size on
new
top-level vdevs.
May be increased up to
.Sy ASHIFT_MAX Po 16 Pc ,
but this may negatively impact pool space efficiency.
.
.It Sy zfs_vdev_direct_write_verify Ns = Ns Sy Linux 1 | FreeBSD 0 Pq uint
If non-zero, then a Direct I/O write's checksum will be verified every
time the write is issued and before it is committed to the block pointer.
In the event the checksum is not valid then the I/O operation will return EIO.
This module parameter can be used to detect if the
contents of the users buffer have changed in the process of doing a Direct I/O
write.
It can also help to identify if reported checksum errors are tied to Direct I/O
writes.
Each verify error causes a
.Sy dio_verify_wr
zevent.
Direct Write I/O checksum verify errors can be seen with
.Nm zpool Cm status Fl d .
The default value for this is 1 on Linux, but is 0 for
.Fx
because user pages can be placed under write protection in
.Fx
before the Direct I/O write is issued.
.
.It Sy zfs_vdev_min_auto_ashift Ns = Ns Sy ASHIFT_MIN Po 9 Pc Pq uint
Minimum ashift used when creating new top-level vdevs.
.
.It Sy zfs_vdev_min_ms_count Ns = Ns Sy 16 Pq uint
Minimum number of metaslabs to create in a top-level vdev.
.
.It Sy vdev_validate_skip Ns = Ns Sy 0 Ns | Ns 1 Pq int
Skip label validation steps during pool import.
Changing is not recommended unless you know what you're doing
and are recovering a damaged label.
.
.It Sy zfs_vdev_ms_count_limit Ns = Ns Sy 131072 Po 128k Pc Pq uint
Practical upper limit of total metaslabs per top-level vdev.
.
.It Sy metaslab_preload_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group preloading.
.
.It Sy metaslab_preload_limit Ns = Ns Sy 10 Pq uint
Maximum number of metaslabs per group to preload
.
.It Sy metaslab_preload_pct Ns = Ns Sy 50 Pq uint
Percentage of CPUs to run a metaslab preload taskq
.
.It Sy metaslab_lba_weighting_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Give more weight to metaslabs with lower LBAs,
assuming they have greater bandwidth,
as is typically the case on a modern constant angular velocity disk drive.
.
.It Sy metaslab_unload_delay Ns = Ns Sy 32 Pq uint
After a metaslab is used, we keep it loaded for this many TXGs, to attempt to
reduce unnecessary reloading.
Note that both this many TXGs and
.Sy metaslab_unload_delay_ms
milliseconds must pass before unloading will occur.
.
.It Sy metaslab_unload_delay_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq uint
After a metaslab is used, we keep it loaded for this many milliseconds,
to attempt to reduce unnecessary reloading.
Note, that both this many milliseconds and
.Sy metaslab_unload_delay
TXGs must pass before unloading will occur.
.
.It Sy reference_history Ns = Ns Sy 3 Pq uint
Maximum reference holders being tracked when reference_tracking_enable is
active.
.It Sy raidz_expand_max_copy_bytes Ns = Ns Sy 160MB Pq ulong
Max amount of memory to use for RAID-Z expansion I/O.
This limits how much I/O can be outstanding at once.
.
.It Sy raidz_expand_max_reflow_bytes Ns = Ns Sy 0 Pq ulong
For testing, pause RAID-Z expansion when reflow amount reaches this value.
.
.It Sy raidz_io_aggregate_rows Ns = Ns Sy 4 Pq ulong
For expanded RAID-Z, aggregate reads that have more rows than this.
.
.It Sy reference_history Ns = Ns Sy 3 Pq int
Maximum reference holders being tracked when reference_tracking_enable is
active.
.
.It Sy reference_tracking_enable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Track reference holders to
.Sy refcount_t
objects (debug builds only).
.
.It Sy send_holes_without_birth_time Ns = Ns Sy 1 Ns | Ns 0 Pq int
When set, the
.Sy hole_birth
optimization will not be used, and all holes will always be sent during a
.Nm zfs Cm send .
This is useful if you suspect your datasets are affected by a bug in
.Sy hole_birth .
.
.It Sy spa_config_path Ns = Ns Pa /etc/zfs/zpool.cache Pq charp
SPA config file.
.
.It Sy spa_asize_inflation Ns = Ns Sy 24 Pq uint
Multiplication factor used to estimate actual disk consumption from the
size of data being written.
The default value is a worst case estimate,
but lower values may be valid for a given pool depending on its configuration.
Pool administrators who understand the factors involved
may wish to specify a more realistic inflation factor,
particularly if they operate close to quota or capacity limits.
.
.It Sy spa_load_print_vdev_tree Ns = Ns Sy 0 Ns | Ns 1 Pq int
Whether to print the vdev tree in the debugging message buffer during pool
import.
.
.It Sy spa_load_verify_data Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse data blocks during an "extreme rewind"
.Pq Fl X
import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal skips non-metadata blocks.
It can be toggled once the
import has started to stop or start the traversal of non-metadata blocks.
.
.It Sy spa_load_verify_metadata Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse blocks during an "extreme rewind"
.Pq Fl X
pool import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal is not performed.
It can be toggled once the import has started to stop or start the traversal.
.
.It Sy spa_load_verify_shift Ns = Ns Sy 4 Po 1/16th Pc Pq uint
Sets the maximum number of bytes to consume during pool import to the log2
fraction of the target ARC size.
.
.It Sy spa_slop_shift Ns = Ns Sy 5 Po 1/32nd Pc Pq int
Normally, we don't allow the last
.Sy 3.2% Pq Sy 1/2^spa_slop_shift
of space in the pool to be consumed.
This ensures that we don't run the pool completely out of space,
due to unaccounted changes (e.g. to the MOS).
It also limits the worst-case time to allocate space.
If we have less than this amount of free space,
most ZPL operations (e.g. write, create) will return
.Sy ENOSPC .
.
.It Sy spa_num_allocators Ns = Ns Sy 4 Pq int
Determines the number of block alloctators to use per spa instance.
Capped by the number of actual CPUs in the system via
.Sy spa_cpus_per_allocator .
.Pp
Note that setting this value too high could result in performance
degredation and/or excess fragmentation.
Set value only applies to pools imported/created after that.
.
.It Sy spa_cpus_per_allocator Ns = Ns Sy 4 Pq int
Determines the minimum number of CPUs in a system for block alloctator
per spa instance.
Set value only applies to pools imported/created after that.
.
.It Sy spa_upgrade_errlog_limit Ns = Ns Sy 0 Pq uint
Limits the number of on-disk error log entries that will be converted to the
new format when enabling the
.Sy head_errlog
feature.
The default is to convert all log entries.
.
.It Sy vdev_removal_max_span Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
During top-level vdev removal, chunks of data are copied from the vdev
which may include free space in order to trade bandwidth for IOPS.
This parameter determines the maximum span of free space, in bytes,
which will be included as "unnecessary" data in a chunk of copied data.
.Pp
The default value here was chosen to align with
.Sy zfs_vdev_read_gap_limit ,
which is a similar concept when doing
regular reads (but there's no reason it has to be the same).
.
.It Sy vdev_file_logical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq u64
Logical ashift for file-based devices.
.
.It Sy vdev_file_physical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq u64
Physical ashift for file-based devices.
.
.It Sy zap_iterate_prefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
If set, when we start iterating over a ZAP object,
prefetch the entire object (all leaf blocks).
However, this is limited by
.Sy dmu_prefetch_max .
.
.It Sy zap_micro_max_size Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq int
Maximum micro ZAP size.
A "micro" ZAP is upgraded to a "fat" ZAP once it grows beyond the specified
size.
Sizes higher than 128KiB will be clamped to 128KiB unless the
.Sy large_microzap
feature is enabled.
.
.It Sy zap_shrink_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
If set, adjacent empty ZAP blocks will be collapsed, reducing disk space.
.
.It Sy zfetch_min_distance Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
Min bytes to prefetch per stream.
Prefetch distance starts from the demand access size and quickly grows to
this value, doubling on each hit.
After that it may grow further by 1/8 per hit, but only if some prefetch
since last time haven't completed in time to satisfy demand request, i.e.
prefetch depth didn't cover the read latency or the pool got saturated.
.
.It Sy zfetch_max_distance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch per stream.
.
.It Sy zfetch_max_idistance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch indirects for per stream.
.
.It Sy zfetch_max_reorder Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
Requests within this byte distance from the current prefetch stream position
are considered parts of the stream, reordered due to parallel processing.
Such requests do not advance the stream position immediately unless
.Sy zfetch_hole_shift
fill threshold is reached, but saved to fill holes in the stream later.
.
.It Sy zfetch_max_streams Ns = Ns Sy 8 Pq uint
Max number of streams per zfetch (prefetch streams per file).
.
.It Sy zfetch_min_sec_reap Ns = Ns Sy 1 Pq uint
Min time before inactive prefetch stream can be reclaimed
.
.It Sy zfetch_max_sec_reap Ns = Ns Sy 2 Pq uint
Max time before inactive prefetch stream can be deleted
.
.It Sy zfs_abd_scatter_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enables ARC from using scatter/gather lists and forces all allocations to be
linear in kernel memory.
Disabling can improve performance in some code paths
at the expense of fragmented kernel memory.
.
.It Sy zfs_abd_scatter_max_order Ns = Ns Sy MAX_ORDER\-1 Pq uint
Maximum number of consecutive memory pages allocated in a single block for
scatter/gather lists.
.Pp
The value of
.Sy MAX_ORDER
depends on kernel configuration.
.
.It Sy zfs_abd_scatter_min_size Ns = Ns Sy 1536 Ns B Po 1.5 KiB Pc Pq uint
This is the minimum allocation size that will use scatter (page-based) ABDs.
Smaller allocations will use linear ABDs.
.
.It Sy zfs_arc_dnode_limit Ns = Ns Sy 0 Ns B Pq u64
When the number of bytes consumed by dnodes in the ARC exceeds this number of
bytes, try to unpin some of it in response to demand for non-metadata.
This value acts as a ceiling to the amount of dnode metadata, and defaults to
.Sy 0 ,
which indicates that a percent which is based on
.Sy zfs_arc_dnode_limit_percent
of the ARC meta buffers that may be used for dnodes.
.It Sy zfs_arc_dnode_limit_percent Ns = Ns Sy 10 Ns % Pq u64
Percentage that can be consumed by dnodes of ARC meta buffers.
.Pp
See also
.Sy zfs_arc_dnode_limit ,
which serves a similar purpose but has a higher priority if nonzero.
.
.It Sy zfs_arc_dnode_reduce_percent Ns = Ns Sy 10 Ns % Pq u64
Percentage of ARC dnodes to try to scan in response to demand for non-metadata
when the number of bytes consumed by dnodes exceeds
.Sy zfs_arc_dnode_limit .
.
.It Sy zfs_arc_average_blocksize Ns = Ns Sy 8192 Ns B Po 8 KiB Pc Pq uint
The ARC's buffer hash table is sized based on the assumption of an average
block size of this value.
This works out to roughly 1 MiB of hash table per 1 GiB of physical memory
with 8-byte pointers.
For configurations with a known larger average block size,
this value can be increased to reduce the memory footprint.
.
.It Sy zfs_arc_eviction_pct Ns = Ns Sy 200 Ns % Pq uint
When
.Fn arc_is_overflowing ,
.Fn arc_get_data_impl
waits for this percent of the requested amount of data to be evicted.
For example, by default, for every
.Em 2 KiB
that's evicted,
.Em 1 KiB
of it may be "reused" by a new allocation.
Since this is above
.Sy 100 Ns % ,
it ensures that progress is made towards getting
.Sy arc_size No under Sy arc_c .
Since this is finite, it ensures that allocations can still happen,
even during the potentially long time that
.Sy arc_size No is more than Sy arc_c .
.
.It Sy zfs_arc_evict_batch_limit Ns = Ns Sy 10 Pq uint
Number ARC headers to evict per sub-list before proceeding to another sub-list.
This batch-style operation prevents entire sub-lists from being evicted at once
but comes at a cost of additional unlocking and locking.
.
.It Sy zfs_arc_grow_retry Ns = Ns Sy 0 Ns s Pq uint
If set to a non zero value, it will replace the
.Sy arc_grow_retry
value with this value.
The
.Sy arc_grow_retry
.No value Pq default Sy 5 Ns s
is the number of seconds the ARC will wait before
trying to resume growth after a memory pressure event.
.
.It Sy zfs_arc_lotsfree_percent Ns = Ns Sy 10 Ns % Pq int
Throttle I/O when free system memory drops below this percentage of total
system memory.
Setting this value to
.Sy 0
will disable the throttle.
.
.It Sy zfs_arc_max Ns = Ns Sy 0 Ns B Pq u64
Max size of ARC in bytes.
If
.Sy 0 ,
then the max size of ARC is determined by the amount of system memory installed.
The larger of
.Sy all_system_memory No \- Sy 1 GiB
and
.Sy 5/8 No \(mu Sy all_system_memory
will be used as the limit.
This value must be at least
.Sy 67108864 Ns B Pq 64 MiB .
.Pp
This value can be changed dynamically, with some caveats.
It cannot be set back to
.Sy 0
while running, and reducing it below the current ARC size will not cause
the ARC to shrink without memory pressure to induce shrinking.
.
.It Sy zfs_arc_meta_balance Ns = Ns Sy 500 Pq uint
Balance between metadata and data on ghost hits.
Values above 100 increase metadata caching by proportionally reducing effect
of ghost data hits on target data/metadata rate.
.
.It Sy zfs_arc_min Ns = Ns Sy 0 Ns B Pq u64
Min size of ARC in bytes.
.No If set to Sy 0 , arc_c_min
will default to consuming the larger of
.Sy 32 MiB
and
.Sy all_system_memory No / Sy 32 .
.
.It Sy zfs_arc_min_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 1s Pc Pq uint
Minimum time prefetched blocks are locked in the ARC.
.
.It Sy zfs_arc_min_prescient_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 6s Pc Pq uint
Minimum time "prescient prefetched" blocks are locked in the ARC.
These blocks are meant to be prefetched fairly aggressively ahead of
the code that may use them.
.
.It Sy zfs_arc_prune_task_threads Ns = Ns Sy 1 Pq int
Number of arc_prune threads.
.Fx
does not need more than one.
Linux may theoretically use one per mount point up to number of CPUs,
but that was not proven to be useful.
.
.It Sy zfs_max_missing_tvds Ns = Ns Sy 0 Pq int
Number of missing top-level vdevs which will be allowed during
pool import (only in read-only mode).
.
.It Sy zfs_max_nvlist_src_size Ns = Sy 0 Pq u64
Maximum size in bytes allowed to be passed as
.Sy zc_nvlist_src_size
for ioctls on
.Pa /dev/zfs .
This prevents a user from causing the kernel to allocate
an excessive amount of memory.
When the limit is exceeded, the ioctl fails with
.Sy EINVAL
and a description of the error is sent to the
.Pa zfs-dbgmsg
log.
This parameter should not need to be touched under normal circumstances.
If
.Sy 0 ,
equivalent to a quarter of the user-wired memory limit under
.Fx
and to
.Sy 134217728 Ns B Pq 128 MiB
under Linux.
.
.It Sy zfs_multilist_num_sublists Ns = Ns Sy 0 Pq uint
To allow more fine-grained locking, each ARC state contains a series
of lists for both data and metadata objects.
Locking is performed at the level of these "sub-lists".
This parameters controls the number of sub-lists per ARC state,
and also applies to other uses of the multilist data structure.
.Pp
If
.Sy 0 ,
equivalent to the greater of the number of online CPUs and
.Sy 4 .
.
.It Sy zfs_arc_overflow_shift Ns = Ns Sy 8 Pq int
The ARC size is considered to be overflowing if it exceeds the current
ARC target size
.Pq Sy arc_c
by thresholds determined by this parameter.
Exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No / Sy 2
starts ARC reclamation process.
If that appears insufficient, exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No \(mu Sy 1.5
blocks new buffer allocation until the reclaim thread catches up.
Started reclamation process continues till ARC size returns below the
target size.
.Pp
The default value of
.Sy 8
causes the ARC to start reclamation if it exceeds the target size by
.Em 0.2%
of the target size, and block allocations by
.Em 0.6% .
.
.It Sy zfs_arc_shrink_shift Ns = Ns Sy 0 Pq uint
If nonzero, this will update
.Sy arc_shrink_shift Pq default Sy 7
with the new value.
.
.It Sy zfs_arc_pc_percent Ns = Ns Sy 0 Ns % Po off Pc Pq uint
Percent of pagecache to reclaim ARC to.
.Pp
This tunable allows the ZFS ARC to play more nicely
with the kernel's LRU pagecache.
It can guarantee that the ARC size won't collapse under scanning
pressure on the pagecache, yet still allows the ARC to be reclaimed down to
.Sy zfs_arc_min
if necessary.
This value is specified as percent of pagecache size (as measured by
.Sy NR_FILE_PAGES ) ,
where that percent may exceed
.Sy 100 .
This
only operates during memory pressure/reclaim.
.
.It Sy zfs_arc_shrinker_limit Ns = Ns Sy 0 Pq int
This is a limit on how many pages the ARC shrinker makes available for
eviction in response to one page allocation attempt.
Note that in practice, the kernel's shrinker can ask us to evict
up to about four times this for one allocation attempt.
To reduce OOM risk, this limit is applied for kswapd reclaims only.
.Pp
For example a value of
.Sy 10000 Pq in practice, Em 160 MiB No per allocation attempt with 4 KiB pages
limits the amount of time spent attempting to reclaim ARC memory to
less than 100 ms per allocation attempt,
even with a small average compressed block size of ~8 KiB.
.Pp
The parameter can be set to 0 (zero) to disable the limit,
and only applies on Linux.
.
.It Sy zfs_arc_shrinker_seeks Ns = Ns Sy 2 Pq int
Relative cost of ARC eviction on Linux, AKA number of seeks needed to
restore evicted page.
Bigger values make ARC more precious and evictions smaller, comparing to
other kernel subsystems.
Value of 4 means parity with page cache.
.
.It Sy zfs_arc_sys_free Ns = Ns Sy 0 Ns B Pq u64
The target number of bytes the ARC should leave as free memory on the system.
If zero, equivalent to the bigger of
.Sy 512 KiB No and Sy all_system_memory/64 .
.
.It Sy zfs_autoimport_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Disable pool import at module load by ignoring the cache file
.Pq Sy spa_config_path .
.
.It Sy zfs_checksum_events_per_second Ns = Ns Sy 20 Ns /s Pq uint
Rate limit checksum events to this many per second.
Note that this should not be set below the ZED thresholds
(currently 10 checksums over 10 seconds)
or else the daemon may not trigger any action.
.
.It Sy zfs_commit_timeout_pct Ns = Ns Sy 10 Ns % Pq uint
This controls the amount of time that a ZIL block (lwb) will remain "open"
when it isn't "full", and it has a thread waiting for it to be committed to
stable storage.
The timeout is scaled based on a percentage of the last lwb
latency to avoid significantly impacting the latency of each individual
transaction record (itx).
.
.It Sy zfs_condense_indirect_commit_entry_delay_ms Ns = Ns Sy 0 Ns ms Pq int
Vdev indirection layer (used for device removal) sleeps for this many
milliseconds during mapping generation.
Intended for use with the test suite to throttle vdev removal speed.
.
.It Sy zfs_condense_indirect_obsolete_pct Ns = Ns Sy 25 Ns % Pq uint
Minimum percent of obsolete bytes in vdev mapping required to attempt to
condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
Intended for use with the test suite
to facilitate triggering condensing as needed.
.
.It Sy zfs_condense_indirect_vdevs_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable condensing indirect vdev mappings.
When set, attempt to condense indirect vdev mappings
if the mapping uses more than
.Sy zfs_condense_min_mapping_bytes
bytes of memory and if the obsolete space map object uses more than
.Sy zfs_condense_max_obsolete_bytes
bytes on-disk.
The condensing process is an attempt to save memory by removing obsolete
mappings.
.
.It Sy zfs_condense_max_obsolete_bytes Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Only attempt to condense indirect vdev mappings if the on-disk size
of the obsolete space map object is greater than this number of bytes
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_condense_min_mapping_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq u64
Minimum size vdev mapping to attempt to condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_dbgmsg_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Internally ZFS keeps a small log to facilitate debugging.
The log is enabled by default, and can be disabled by unsetting this option.
The contents of the log can be accessed by reading
.Pa /proc/spl/kstat/zfs/dbgmsg .
Writing
.Sy 0
to the file clears the log.
.Pp
This setting does not influence debug prints due to
.Sy zfs_flags .
.
.It Sy zfs_dbgmsg_maxsize Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
Maximum size of the internal ZFS debug log.
.
.It Sy zfs_dbuf_state_index Ns = Ns Sy 0 Pq int
Historically used for controlling what reporting was available under
.Pa /proc/spl/kstat/zfs .
No effect.
.
.It Sy zfs_deadman_checktime_ms Ns = Ns Sy 60000 Ns ms Po 1 min Pc Pq u64
Check time in milliseconds.
This defines the frequency at which we check for hung I/O requests
and potentially invoke the
.Sy zfs_deadman_failmode
behavior.
.
.It Sy zfs_deadman_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
When a pool sync operation takes longer than
.Sy zfs_deadman_synctime_ms ,
or when an individual I/O operation takes longer than
.Sy zfs_deadman_ziotime_ms ,
then the operation is considered to be "hung".
If
.Sy zfs_deadman_enabled
is set, then the deadman behavior is invoked as described by
.Sy zfs_deadman_failmode .
By default, the deadman is enabled and set to
.Sy wait
which results in "hung" I/O operations only being logged.
The deadman is automatically disabled when a pool gets suspended.
.
.It Sy zfs_deadman_events_per_second Ns = Ns Sy 1 Ns /s Pq int
Rate limit deadman zevents (which report hung I/O operations) to this many per
second.
.
.It Sy zfs_deadman_failmode Ns = Ns Sy wait Pq charp
Controls the failure behavior when the deadman detects a "hung" I/O operation.
Valid values are:
.Bl -tag -compact -offset 4n -width "continue"
.It Sy wait
Wait for a "hung" operation to complete.
For each "hung" operation a "deadman" event will be posted
describing that operation.
.It Sy continue
Attempt to recover from a "hung" operation by re-dispatching it
to the I/O pipeline if possible.
.It Sy panic
Panic the system.
This can be used to facilitate automatic fail-over
to a properly configured fail-over partner.
.El
.
.It Sy zfs_deadman_synctime_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq u64
Interval in milliseconds after which the deadman is triggered and also
the interval after which a pool sync operation is considered to be "hung".
Once this limit is exceeded the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the pool sync completes.
.
.It Sy zfs_deadman_ziotime_ms Ns = Ns Sy 300000 Ns ms Po 5 min Pc Pq u64
Interval in milliseconds after which the deadman is triggered and an
individual I/O operation is considered to be "hung".
As long as the operation remains "hung",
the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the operation completes.
.
.It Sy zfs_dedup_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enable prefetching dedup-ed blocks which are going to be freed.
.
.It Sy zfs_dedup_log_flush_passes_max Ns = Ns Sy 8 Ns Pq uint
Maximum number of dedup log flush passes (iterations) each transaction.
.Pp
At the start of each transaction, OpenZFS will estimate how many entries it
needs to flush out to keep up with the change rate, taking the amount and time
taken to flush on previous txgs into account (see
.Sy zfs_dedup_log_flush_flow_rate_txgs ) .
It will spread this amount into a number of passes.
At each pass, it will use the amount already flushed and the total time taken
by flushing and by other IO to recompute how much it should do for the remainder
of the txg.
.Pp
Reducing the max number of passes will make flushing more aggressive, flushing
out more entries on each pass.
This can be faster, but also more likely to compete with other IO.
Increasing the max number of passes will put fewer entries onto each pass,
keeping the overhead of dedup changes to a minimum but possibly causing a large
number of changes to be dumped on the last pass, which can blow out the txg
sync time beyond
.Sy zfs_txg_timeout .
.
.It Sy zfs_dedup_log_flush_min_time_ms Ns = Ns Sy 1000 Ns Pq uint
Minimum time to spend on dedup log flush each transaction.
.Pp
At least this long will be spent flushing dedup log entries each transaction,
up to
.Sy zfs_txg_timeout .
This occurs even if doing so would delay the transaction, that is, other IO
completes under this time.
.
.It Sy zfs_dedup_log_flush_entries_min Ns = Ns Sy 1000 Ns Pq uint
Flush at least this many entries each transaction.
.Pp
OpenZFS will estimate how many entries it needs to flush each transaction to
keep up with the ingest rate (see
.Sy zfs_dedup_log_flush_flow_rate_txgs ) .
This sets the minimum for that estimate.
Raising it can force OpenZFS to flush more aggressively, keeping the log small
and so reducing pool import times, but can make it less able to back off if
log flushing would compete with other IO too much.
.
.It Sy zfs_dedup_log_flush_flow_rate_txgs Ns = Ns Sy 10 Ns Pq uint
Number of transactions to use to compute the flow rate.
.Pp
OpenZFS will estimate how many entries it needs to flush each transaction by
monitoring the number of entries changed (ingest rate), number of entries
flushed (flush rate) and time spent flushing (flush time rate) and combining
these into an overall "flow rate".
It will use an exponential weighted moving average over some number of recent
transactions to compute these rates.
This sets the number of transactions to compute these averages over.
Setting it higher can help to smooth out the flow rate in the face of spiky
workloads, but will take longer for the flow rate to adjust to a sustained
change in the ingress rate.
.
.It Sy zfs_dedup_log_txg_max Ns = Ns Sy 8 Ns Pq uint
Max transactions to before starting to flush dedup logs.
.Pp
OpenZFS maintains two dedup logs, one receiving new changes, one flushing.
If there is nothing to flush, it will accumulate changes for no more than this
many transactions before switching the logs and starting to flush entries out.
.
.It Sy zfs_dedup_log_mem_max Ns = Ns Sy 0 Ns Pq u64
Max memory to use for dedup logs.
.Pp
OpenZFS will spend no more than this much memory on maintaining the in-memory
dedup log.
Flushing will begin when around half this amount is being spent on logs.
The default value of
.Sy 0
will cause it to be set by
.Sy zfs_dedup_log_mem_max_percent
instead.
.
.It Sy zfs_dedup_log_mem_max_percent Ns = Ns Sy 1 Ns % Pq uint
Max memory to use for dedup logs, as a percentage of total memory.
.Pp
If
.Sy zfs_dedup_log_mem_max
is not set, it will be initialised as a percentage of the total memory in the
system.
.
.It Sy zfs_delay_min_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
Start to delay each transaction once there is this amount of dirty data,
expressed as a percentage of
.Sy zfs_dirty_data_max .
This value should be at least
.Sy zfs_vdev_async_write_active_max_dirty_percent .
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_delay_scale Ns = Ns Sy 500000 Pq int
This controls how quickly the transaction delay approaches infinity.
Larger values cause longer delays for a given amount of dirty data.
.Pp
For the smoothest delay, this value should be about 1 billion divided
by the maximum number of operations per second.
This will smoothly handle between ten times and a tenth of this number.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
.Sy zfs_delay_scale No \(mu Sy zfs_dirty_data_max Em must No be smaller than Sy 2^64 .
.
.It Sy zfs_dio_write_verify_events_per_second Ns = Ns Sy 20 Ns /s Pq uint
Rate limit Direct I/O write verify events to this many per second.
.
.It Sy zfs_disable_ivset_guid_check Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disables requirement for IVset GUIDs to be present and match when doing a raw
receive of encrypted datasets.
Intended for users whose pools were created with
OpenZFS pre-release versions and now have compatibility issues.
.
.It Sy zfs_key_max_salt_uses Ns = Ns Sy 400000000 Po 4*10^8 Pc Pq ulong
Maximum number of uses of a single salt value before generating a new one for
encrypted datasets.
The default value is also the maximum.
.
.It Sy zfs_object_mutex_size Ns = Ns Sy 64 Pq uint
Size of the znode hashtable used for holds.
.Pp
Due to the need to hold locks on objects that may not exist yet, kernel mutexes
are not created per-object and instead a hashtable is used where collisions
will result in objects waiting when there is not actually contention on the
same object.
.
.It Sy zfs_slow_io_events_per_second Ns = Ns Sy 20 Ns /s Pq int
Rate limit delay zevents (which report slow I/O operations) to this many per
second.
.
.It Sy zfs_unflushed_max_mem_amt Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Upper-bound limit for unflushed metadata changes to be held by the
log spacemap in memory, in bytes.
.
.It Sy zfs_unflushed_max_mem_ppm Ns = Ns Sy 1000 Ns ppm Po 0.1% Pc Pq u64
Part of overall system memory that ZFS allows to be used
for unflushed metadata changes by the log spacemap, in millionths.
.
.It Sy zfs_unflushed_log_block_max Ns = Ns Sy 131072 Po 128k Pc Pq u64
Describes the maximum number of log spacemap blocks allowed for each pool.
The default value means that the space in all the log spacemaps
can add up to no more than
.Sy 131072
blocks (which means
.Em 16 GiB
of logical space before compression and ditto blocks,
assuming that blocksize is
.Em 128 KiB ) .
.Pp
This tunable is important because it involves a trade-off between import
time after an unclean export and the frequency of flushing metaslabs.
The higher this number is, the more log blocks we allow when the pool is
active which means that we flush metaslabs less often and thus decrease
the number of I/O operations for spacemap updates per TXG.
At the same time though, that means that in the event of an unclean export,
there will be more log spacemap blocks for us to read, inducing overhead
in the import time of the pool.
The lower the number, the amount of flushing increases, destroying log
blocks quicker as they become obsolete faster, which leaves less blocks
to be read during import time after a crash.
.Pp
Each log spacemap block existing during pool import leads to approximately
one extra logical I/O issued.
This is the reason why this tunable is exposed in terms of blocks rather
than space used.
.
.It Sy zfs_unflushed_log_block_min Ns = Ns Sy 1000 Pq u64
If the number of metaslabs is small and our incoming rate is high,
we could get into a situation that we are flushing all our metaslabs every TXG.
Thus we always allow at least this many log blocks.
.
.It Sy zfs_unflushed_log_block_pct Ns = Ns Sy 400 Ns % Pq u64
Tunable used to determine the number of blocks that can be used for
the spacemap log, expressed as a percentage of the total number of
unflushed metaslabs in the pool.
.
.It Sy zfs_unflushed_log_txg_max Ns = Ns Sy 1000 Pq u64
Tunable limiting maximum time in TXGs any metaslab may remain unflushed.
It effectively limits maximum number of unflushed per-TXG spacemap logs
that need to be read after unclean pool export.
.
.It Sy zfs_unlink_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When enabled, files will not be asynchronously removed from the list of pending
unlinks and the space they consume will be leaked.
Once this option has been disabled and the dataset is remounted,
the pending unlinks will be processed and the freed space returned to the pool.
This option is used by the test suite.
.
.It Sy zfs_delete_blocks Ns = Ns Sy 20480 Pq ulong
This is the used to define a large file for the purposes of deletion.
Files containing more than
.Sy zfs_delete_blocks
will be deleted asynchronously, while smaller files are deleted synchronously.
Decreasing this value will reduce the time spent in an
.Xr unlink 2
system call, at the expense of a longer delay before the freed space is
available.
This only applies on Linux.
.
.It Sy zfs_dirty_data_max Ns = Pq int
Determines the dirty space limit in bytes.
Once this limit is exceeded, new writes are halted until space frees up.
This parameter takes precedence over
.Sy zfs_dirty_data_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy physical_ram/10 ,
capped at
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_max_max Ns = Pq int
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed in bytes.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
This parameter takes precedence over
.Sy zfs_dirty_data_max_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy min(physical_ram/4, 4GiB) ,
or
.Sy min(physical_ram/4, 1GiB)
for 32-bit systems.
.
.It Sy zfs_dirty_data_max_max_percent Ns = Ns Sy 25 Ns % Pq uint
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed as a percentage of physical RAM.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
The parameter
.Sy zfs_dirty_data_max_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_dirty_data_max_percent Ns = Ns Sy 10 Ns % Pq uint
Determines the dirty space limit, expressed as a percentage of all memory.
Once this limit is exceeded, new writes are halted until space frees up.
The parameter
.Sy zfs_dirty_data_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Subject to
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_sync_percent Ns = Ns Sy 20 Ns % Pq uint
Start syncing out a transaction group if there's at least this much dirty data
.Pq as a percentage of Sy zfs_dirty_data_max .
This should be less than
.Sy zfs_vdev_async_write_active_min_dirty_percent .
.
.It Sy zfs_wrlog_data_max Ns = Pq int
The upper limit of write-transaction zil log data size in bytes.
Write operations are throttled when approaching the limit until log data is
cleared out after transaction group sync.
Because of some overhead, it should be set at least 2 times the size of
.Sy zfs_dirty_data_max
.No to prevent harming normal write throughput .
It also should be smaller than the size of the slog device if slog is present.
.Pp
Defaults to
.Sy zfs_dirty_data_max*2
.
.It Sy zfs_fallocate_reserve_percent Ns = Ns Sy 110 Ns % Pq uint
Since ZFS is a copy-on-write filesystem with snapshots, blocks cannot be
preallocated for a file in order to guarantee that later writes will not
run out of space.
Instead,
.Xr fallocate 2
space preallocation only checks that sufficient space is currently available
in the pool or the user's project quota allocation,
and then creates a sparse file of the requested size.
The requested space is multiplied by
.Sy zfs_fallocate_reserve_percent
to allow additional space for indirect blocks and other internal metadata.
Setting this to
.Sy 0
disables support for
.Xr fallocate 2
and causes it to return
.Sy EOPNOTSUPP .
.
.It Sy zfs_fletcher_4_impl Ns = Ns Sy fastest Pq string
Select a fletcher 4 implementation.
.Pp
Supported selectors are:
.Sy fastest , scalar , sse2 , ssse3 , avx2 , avx512f , avx512bw ,
.No and Sy aarch64_neon .
All except
.Sy fastest No and Sy scalar
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of fletcher 4 are available, the
.Sy fastest
will be chosen using a micro benchmark.
Selecting
.Sy scalar
results in the original CPU-based calculation being used.
Selecting any option other than
.Sy fastest No or Sy scalar
results in vector instructions
from the respective CPU instruction set being used.
.
.It Sy zfs_bclone_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enables access to the block cloning feature.
If this setting is 0, then even if feature@block_cloning is enabled,
using functions and system calls that attempt to clone blocks will act as
though the feature is disabled.
.
.It Sy zfs_bclone_wait_dirty Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set to 1 the FICLONE and FICLONERANGE ioctls wait for dirty data to be
written to disk.
This allows the clone operation to reliably succeed when a file is
modified and then immediately cloned.
For small files this may be slower than making a copy of the file.
Therefore, this setting defaults to 0 which causes a clone operation to
immediately fail when encountering a dirty block.
.
.It Sy zfs_blake3_impl Ns = Ns Sy fastest Pq string
Select a BLAKE3 implementation.
.Pp
Supported selectors are:
.Sy cycle , fastest , generic , sse2 , sse41 , avx2 , avx512 .
All except
.Sy cycle , fastest No and Sy generic
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of BLAKE3 are available, the
.Sy fastest will be chosen using a micro benchmark. You can see the
benchmark results by reading this kstat file:
.Pa /proc/spl/kstat/zfs/chksum_bench .
.
.It Sy zfs_free_bpobj_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable the processing of the free_bpobj object.
.
.It Sy zfs_async_block_max_blocks Ns = Ns Sy UINT64_MAX Po unlimited Pc Pq u64
Maximum number of blocks freed in a single TXG.
.
.It Sy zfs_max_async_dedup_frees Ns = Ns Sy 100000 Po 10^5 Pc Pq u64
Maximum number of dedup blocks freed in a single TXG.
.
.It Sy zfs_vdev_async_read_max_active Ns = Ns Sy 3 Pq uint
Maximum asynchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_read_min_active Ns = Ns Sy 1 Pq uint
Minimum asynchronous read I/O operation active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_max_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
When the pool has more than this much dirty data, use
.Sy zfs_vdev_async_write_max_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_min_dirty_percent Ns = Ns Sy 30 Ns % Pq uint
When the pool has less than this much dirty data, use
.Sy zfs_vdev_async_write_min_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly
interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_max_active Ns = Ns Sy 10 Pq uint
Maximum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_min_active Ns = Ns Sy 2 Pq uint
Minimum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.Pp
Lower values are associated with better latency on rotational media but poorer
resilver performance.
The default value of
.Sy 2
was chosen as a compromise.
A value of
.Sy 3
has been shown to improve resilver performance further at a cost of
further increasing latency.
.
.It Sy zfs_vdev_initializing_max_active Ns = Ns Sy 1 Pq uint
Maximum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_initializing_min_active Ns = Ns Sy 1 Pq uint
Minimum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_max_active Ns = Ns Sy 1000 Pq uint
The maximum number of I/O operations active to each device.
Ideally, this will be at least the sum of each queue's
.Sy max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_open_timeout_ms Ns = Ns Sy 1000 Pq uint
Timeout value to wait before determining a device is missing
during import.
This is helpful for transient missing paths due
to links being briefly removed and recreated in response to
udev events.
.
.It Sy zfs_vdev_rebuild_max_active Ns = Ns Sy 3 Pq uint
Maximum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_rebuild_min_active Ns = Ns Sy 1 Pq uint
Minimum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_max_active Ns = Ns Sy 2 Pq uint
Maximum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_min_active Ns = Ns Sy 1 Pq uint
Minimum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_max_active Ns = Ns Sy 2 Pq uint
Maximum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_min_active Ns = Ns Sy 1 Pq uint
Minimum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_max_active Ns = Ns Sy 10 Pq uint
Maximum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_min_active Ns = Ns Sy 10 Pq uint
Minimum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_max_active Ns = Ns Sy 10 Pq uint
Maximum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_min_active Ns = Ns Sy 10 Pq uint
Minimum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_max_active Ns = Ns Sy 2 Pq uint
Maximum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_min_active Ns = Ns Sy 1 Pq uint
Minimum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_delay Ns = Ns Sy 5 Pq uint
For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
the number of concurrently-active I/O operations is limited to
.Sy zfs_*_min_active ,
unless the vdev is "idle".
When there are no interactive I/O operations active (synchronous or otherwise),
and
.Sy zfs_vdev_nia_delay
operations have completed since the last interactive operation,
then the vdev is considered to be "idle",
and the number of concurrently-active non-interactive operations is increased to
.Sy zfs_*_max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_credit Ns = Ns Sy 5 Pq uint
Some HDDs tend to prioritize sequential I/O so strongly, that concurrent
random I/O latency reaches several seconds.
On some HDDs this happens even if sequential I/O operations
are submitted one at a time, and so setting
.Sy zfs_*_max_active Ns = Sy 1
does not help.
To prevent non-interactive I/O, like scrub,
from monopolizing the device, no more than
.Sy zfs_vdev_nia_credit operations can be sent
while there are outstanding incomplete interactive operations.
This enforced wait ensures the HDD services the interactive I/O
within a reasonable amount of time.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_queue_depth_pct Ns = Ns Sy 1000 Ns % Pq uint
Maximum number of queued allocations per top-level vdev expressed as
a percentage of
.Sy zfs_vdev_async_write_max_active ,
which allows the system to detect devices that are more capable
of handling allocations and to allocate more blocks to those devices.
This allows for dynamic allocation distribution when devices are imbalanced,
as fuller devices will tend to be slower than empty devices.
.Pp
Also see
.Sy zio_dva_throttle_enabled .
.
.It Sy zfs_vdev_def_queue_depth Ns = Ns Sy 32 Pq uint
Default queue depth for each vdev IO allocator.
Higher values allow for better coalescing of sequential writes before sending
them to the disk, but can increase transaction commit times.
.
.It Sy zfs_vdev_failfast_mask Ns = Ns Sy 1 Pq uint
Defines if the driver should retire on a given error type.
The following options may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Name Description
_
1 Device No driver retries on device errors
2 Transport No driver retries on transport errors.
4 Driver No driver retries on driver errors.
.TE
.
.It Sy zfs_vdev_disk_max_segs Ns = Ns Sy 0 Pq uint
Maximum number of segments to add to a BIO (min 4).
If this is higher than the maximum allowed by the device queue or the kernel
itself, it will be clamped.
Setting it to zero will cause the kernel's ideal size to be used.
This parameter only applies on Linux.
This parameter is ignored if
.Sy zfs_vdev_disk_classic Ns = Ns Sy 1 .
.
.It Sy zfs_vdev_disk_classic Ns = Ns Sy 0 Ns | Ns 1 Pq uint
If set to 1, OpenZFS will submit IO to Linux using the method it used in 2.2
and earlier.
This "classic" method has known issues with highly fragmented IO requests and
is slower on many workloads, but it has been in use for many years and is known
to be very stable.
If you set this parameter, please also open a bug report why you did so,
including the workload involved and any error messages.
.Pp
This parameter and the classic submission method will be removed once we have
total confidence in the new method.
.Pp
This parameter only applies on Linux, and can only be set at module load time.
.
.It Sy zfs_expire_snapshot Ns = Ns Sy 300 Ns s Pq int
Time before expiring
.Pa .zfs/snapshot .
.
.It Sy zfs_admin_snapshot Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow the creation, removal, or renaming of entries in the
.Sy .zfs/snapshot
directory to cause the creation, destruction, or renaming of snapshots.
When enabled, this functionality works both locally and over NFS exports
which have the
.Em no_root_squash
option set.
.
.It Sy zfs_snapshot_no_setuid Ns = Ns Sy 0 Ns | Ns 1 Pq int
Whether to disable
.Em setuid/setgid
support for snapshot mounts triggered by access to the
.Sy .zfs/snapshot
directory by setting the
.Em nosuid
mount option.
.
.It Sy zfs_flags Ns = Ns Sy 0 Pq int
Set additional debugging flags.
The following flags may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Name Description
_
1 ZFS_DEBUG_DPRINTF Enable dprintf entries in the debug log.
* 2 ZFS_DEBUG_DBUF_VERIFY Enable extra dbuf verifications.
* 4 ZFS_DEBUG_DNODE_VERIFY Enable extra dnode verifications.
8 ZFS_DEBUG_SNAPNAMES Enable snapshot name verification.
* 16 ZFS_DEBUG_MODIFY Check for illegally modified ARC buffers.
64 ZFS_DEBUG_ZIO_FREE Enable verification of block frees.
128 ZFS_DEBUG_HISTOGRAM_VERIFY Enable extra spacemap histogram verifications.
256 ZFS_DEBUG_METASLAB_VERIFY Verify space accounting on disk matches in-memory \fBrange_trees\fP.
512 ZFS_DEBUG_SET_ERROR Enable \fBSET_ERROR\fP and dprintf entries in the debug log.
1024 ZFS_DEBUG_INDIRECT_REMAP Verify split blocks created by device removal.
2048 ZFS_DEBUG_TRIM Verify TRIM ranges are always within the allocatable range tree.
4096 ZFS_DEBUG_LOG_SPACEMAP Verify that the log summary is consistent with the spacemap log
and enable \fBzfs_dbgmsgs\fP for metaslab loading and flushing.
.TE
.Sy \& * No Requires debug build .
.
.It Sy zfs_btree_verify_intensity Ns = Ns Sy 0 Pq uint
Enables btree verification.
The following settings are cumulative:
.TS
box;
lbz r l l .
Value Description
1 Verify height.
2 Verify pointers from children to parent.
3 Verify element counts.
4 Verify element order. (expensive)
* 5 Verify unused memory is poisoned. (expensive)
.TE
.Sy \& * No Requires debug build .
.
.It Sy zfs_free_leak_on_eio Ns = Ns Sy 0 Ns | Ns 1 Pq int
If destroy encounters an
.Sy EIO
while reading metadata (e.g. indirect blocks),
space referenced by the missing metadata can not be freed.
Normally this causes the background destroy to become "stalled",
as it is unable to make forward progress.
While in this stalled state, all remaining space to free
from the error-encountering filesystem is "temporarily leaked".
Set this flag to cause it to ignore the
.Sy EIO ,
permanently leak the space from indirect blocks that can not be read,
and continue to free everything else that it can.
.Pp
The default "stalling" behavior is useful if the storage partially
fails (i.e. some but not all I/O operations fail), and then later recovers.
In this case, we will be able to continue pool operations while it is
partially failed, and when it recovers, we can continue to free the
space, with no leaks.
Note, however, that this case is actually fairly rare.
.Pp
Typically pools either
.Bl -enum -compact -offset 4n -width "1."
.It
fail completely (but perhaps temporarily,
e.g. due to a top-level vdev going offline), or
.It
have localized, permanent errors (e.g. disk returns the wrong data
due to bit flip or firmware bug).
.El
In the former case, this setting does not matter because the
pool will be suspended and the sync thread will not be able to make
forward progress regardless.
In the latter, because the error is permanent, the best we can do
is leak the minimum amount of space,
which is what setting this flag will do.
It is therefore reasonable for this flag to normally be set,
but we chose the more conservative approach of not setting it,
so that there is no possibility of
leaking space in the "partial temporary" failure case.
.
.It Sy zfs_free_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq uint
During a
.Nm zfs Cm destroy
operation using the
.Sy async_destroy
feature,
a minimum of this much time will be spent working on freeing blocks per TXG.
.
.It Sy zfs_obsolete_min_time_ms Ns = Ns Sy 500 Ns ms Pq uint
Similar to
.Sy zfs_free_min_time_ms ,
but for cleanup of old indirection records for removed vdevs.
.
.It Sy zfs_immediate_write_sz Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq s64
Largest data block to write to the ZIL.
Larger blocks will be treated as if the dataset being written to had the
.Sy logbias Ns = Ns Sy throughput
property set.
.
.It Sy zfs_initialize_value Ns = Ns Sy 16045690984833335022 Po 0xDEADBEEFDEADBEEE Pc Pq u64
Pattern written to vdev free space by
.Xr zpool-initialize 8 .
.
.It Sy zfs_initialize_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Size of writes used by
.Xr zpool-initialize 8 .
This option is used by the test suite.
.
.It Sy zfs_livelist_max_entries Ns = Ns Sy 500000 Po 5*10^5 Pc Pq u64
The threshold size (in block pointers) at which we create a new sub-livelist.
Larger sublists are more costly from a memory perspective but the fewer
sublists there are, the lower the cost of insertion.
.
.It Sy zfs_livelist_min_percent_shared Ns = Ns Sy 75 Ns % Pq int
If the amount of shared space between a snapshot and its clone drops below
this threshold, the clone turns off the livelist and reverts to the old
deletion method.
This is in place because livelists no long give us a benefit
once a clone has been overwritten enough.
.
.It Sy zfs_livelist_condense_new_alloc Ns = Ns Sy 0 Pq int
Incremented each time an extra ALLOC blkptr is added to a livelist entry while
it is being condensed.
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_sync .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the synctask \(em
.Fn spa_livelist_condense_sync .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_livelist_condense_zthr_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_zthr_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the open context condensing work in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_lua_max_instrlimit Ns = Ns Sy 100000000 Po 10^8 Pc Pq u64
The maximum execution time limit that can be set for a ZFS channel program,
specified as a number of Lua instructions.
.
.It Sy zfs_lua_max_memlimit Ns = Ns Sy 104857600 Po 100 MiB Pc Pq u64
The maximum memory limit that can be set for a ZFS channel program, specified
in bytes.
.
.It Sy zfs_max_dataset_nesting Ns = Ns Sy 50 Pq int
The maximum depth of nested datasets.
This value can be tuned temporarily to
fix existing datasets that exceed the predefined limit.
.
.It Sy zfs_max_log_walking Ns = Ns Sy 5 Pq u64
The number of past TXGs that the flushing algorithm of the log spacemap
feature uses to estimate incoming log blocks.
.
.It Sy zfs_max_logsm_summary_length Ns = Ns Sy 10 Pq u64
Maximum number of rows allowed in the summary of the spacemap log.
.
.It Sy zfs_max_recordsize Ns = Ns Sy 16777216 Po 16 MiB Pc Pq uint
We currently support block sizes from
.Em 512 Po 512 B Pc No to Em 16777216 Po 16 MiB Pc .
The benefits of larger blocks, and thus larger I/O,
need to be weighed against the cost of COWing a giant block to modify one byte.
Additionally, very large blocks can have an impact on I/O latency,
and also potentially on the memory allocator.
Therefore, we formerly forbade creating blocks larger than 1M.
Larger blocks could be created by changing it,
and pools with larger blocks can always be imported and used,
regardless of this setting.
.Pp
Note that it is still limited by default to
.Ar 1 MiB
on x86_32, because Linux's
3/1 memory split doesn't leave much room for 16M chunks.
.
.It Sy zfs_allow_redacted_dataset_mount Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow datasets received with redacted send/receive to be mounted.
Normally disabled because these datasets may be missing key data.
.
.It Sy zfs_min_metaslabs_to_flush Ns = Ns Sy 1 Pq u64
Minimum number of metaslabs to flush per dirty TXG.
.
-.It Sy zfs_metaslab_fragmentation_threshold Ns = Ns Sy 70 Ns % Pq uint
+.It Sy zfs_metaslab_fragmentation_threshold Ns = Ns Sy 77 Ns % Pq uint
Allow metaslabs to keep their active state as long as their fragmentation
percentage is no more than this value.
An active metaslab that exceeds this threshold
will no longer keep its active status allowing better metaslabs to be selected.
.
.It Sy zfs_mg_fragmentation_threshold Ns = Ns Sy 95 Ns % Pq uint
Metaslab groups are considered eligible for allocations if their
fragmentation metric (measured as a percentage) is less than or equal to
this value.
If a metaslab group exceeds this threshold then it will be
skipped unless all metaslab groups within the metaslab class have also
crossed this threshold.
.
.It Sy zfs_mg_noalloc_threshold Ns = Ns Sy 0 Ns % Pq uint
Defines a threshold at which metaslab groups should be eligible for allocations.
The value is expressed as a percentage of free space
beyond which a metaslab group is always eligible for allocations.
If a metaslab group's free space is less than or equal to the
threshold, the allocator will avoid allocating to that group
unless all groups in the pool have reached the threshold.
Once all groups have reached the threshold, all groups are allowed to accept
allocations.
The default value of
.Sy 0
disables the feature and causes all metaslab groups to be eligible for
allocations.
.Pp
This parameter allows one to deal with pools having heavily imbalanced
vdevs such as would be the case when a new vdev has been added.
Setting the threshold to a non-zero percentage will stop allocations
from being made to vdevs that aren't filled to the specified percentage
and allow lesser filled vdevs to acquire more allocations than they
otherwise would under the old
.Sy zfs_mg_alloc_failures
facility.
.
.It Sy zfs_ddt_data_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place DDT data into the special allocation class.
.
.It Sy zfs_user_indirect_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place user data indirect blocks
into the special allocation class.
.
.It Sy zfs_multihost_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest multihost updates will be available
in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /multihost .
.
.It Sy zfs_multihost_interval Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq u64
Used to control the frequency of multihost writes which are performed when the
.Sy multihost
pool property is on.
This is one of the factors used to determine the
length of the activity check during import.
.Pp
The multihost write period is
.Sy zfs_multihost_interval No / Sy leaf-vdevs .
On average a multihost write will be issued for each leaf vdev
every
.Sy zfs_multihost_interval
milliseconds.
In practice, the observed period can vary with the I/O load
and this observed value is the delay which is stored in the uberblock.
.
.It Sy zfs_multihost_import_intervals Ns = Ns Sy 20 Pq uint
Used to control the duration of the activity test on import.
Smaller values of
.Sy zfs_multihost_import_intervals
will reduce the import time but increase
the risk of failing to detect an active pool.
The total activity check time is never allowed to drop below one second.
.Pp
On import the activity check waits a minimum amount of time determined by
.Sy zfs_multihost_interval No \(mu Sy zfs_multihost_import_intervals ,
or the same product computed on the host which last had the pool imported,
whichever is greater.
The activity check time may be further extended if the value of MMP
delay found in the best uberblock indicates actual multihost updates happened
at longer intervals than
.Sy zfs_multihost_interval .
A minimum of
.Em 100 ms
is enforced.
.Pp
.Sy 0 No is equivalent to Sy 1 .
.
.It Sy zfs_multihost_fail_intervals Ns = Ns Sy 10 Pq uint
Controls the behavior of the pool when multihost write failures or delays are
detected.
.Pp
When
.Sy 0 ,
multihost write failures or delays are ignored.
The failures will still be reported to the ZED which depending on
its configuration may take action such as suspending the pool or offlining a
device.
.Pp
Otherwise, the pool will be suspended if
.Sy zfs_multihost_fail_intervals No \(mu Sy zfs_multihost_interval
milliseconds pass without a successful MMP write.
This guarantees the activity test will see MMP writes if the pool is imported.
.Sy 1 No is equivalent to Sy 2 ;
this is necessary to prevent the pool from being suspended
due to normal, small I/O latency variations.
.
.It Sy zfs_no_scrub_io Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable scrub I/O.
This results in scrubs not actually scrubbing data and
simply doing a metadata crawl of the pool instead.
.
.It Sy zfs_no_scrub_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable block prefetching for scrubs.
.
.It Sy zfs_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable cache flush operations on disks when writing.
Setting this will cause pool corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zfs_nopwrite_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Allow no-operation writes.
The occurrence of nopwrites will further depend on other pool properties
.Pq i.a. the checksumming and compression algorithms .
.
.It Sy zfs_dmu_offset_next_sync Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable forcing TXG sync to find holes.
When enabled forces ZFS to sync data when
.Sy SEEK_HOLE No or Sy SEEK_DATA
flags are used allowing holes in a file to be accurately reported.
When disabled holes will not be reported in recently dirtied files.
.
.It Sy zfs_pd_bytes_max Ns = Ns Sy 52428800 Ns B Po 50 MiB Pc Pq int
The number of bytes which should be prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_traverse_indirect_prefetch_limit Ns = Ns Sy 32 Pq uint
The number of blocks pointed by indirect (non-L0) block which should be
prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_per_txg_dirty_frees_percent Ns = Ns Sy 30 Ns % Pq u64
Control percentage of dirtied indirect blocks from frees allowed into one TXG.
After this threshold is crossed, additional frees will wait until the next TXG.
.Sy 0 No disables this throttle .
.
.It Sy zfs_prefetch_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable predictive prefetch.
Note that it leaves "prescient" prefetch
.Pq for, e.g., Nm zfs Cm send
intact.
Unlike predictive prefetch, prescient prefetch never issues I/O
that ends up not being needed, so it can't hurt performance.
.
.It Sy zfs_qat_checksum_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for SHA256 checksums.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_compress_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for gzip compression.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_encrypt_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for AES-GCM encryption.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_vnops_read_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Bytes to read per chunk.
.
.It Sy zfs_read_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest reads will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /reads .
.
.It Sy zfs_read_history_hits Ns = Ns Sy 0 Ns | Ns 1 Pq int
Include cache hits in read history
.
.It Sy zfs_rebuild_max_segment Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Maximum read segment size to issue when sequentially resilvering a
top-level vdev.
.
.It Sy zfs_rebuild_scrub_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Automatically start a pool scrub when the last active sequential resilver
completes in order to verify the checksums of all blocks which have been
resilvered.
This is enabled by default and strongly recommended.
.
.It Sy zfs_rebuild_vdev_limit Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq u64
Maximum amount of I/O that can be concurrently issued for a sequential
resilver per leaf device, given in bytes.
.
.It Sy zfs_reconstruct_indirect_combinations_max Ns = Ns Sy 4096 Pq int
If an indirect split block contains more than this many possible unique
combinations when being reconstructed, consider it too computationally
expensive to check them all.
Instead, try at most this many randomly selected
combinations each time the block is accessed.
This allows all segment copies to participate fairly
in the reconstruction when all combinations
cannot be checked and prevents repeated use of one bad copy.
.
.It Sy zfs_recover Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to attempt to recover from fatal errors.
This should only be used as a last resort,
as it typically results in leaked space, or worse.
.
.It Sy zfs_removal_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore hard I/O errors during device removal.
When set, if a device encounters a hard I/O error during the removal process
the removal will not be cancelled.
This can result in a normally recoverable block becoming permanently damaged
and is hence not recommended.
This should only be used as a last resort when the
pool cannot be returned to a healthy state prior to removing the device.
.
.It Sy zfs_removal_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
This is used by the test suite so that it can ensure that certain actions
happen while in the middle of a removal.
.
.It Sy zfs_remove_max_segment Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The largest contiguous segment that we will attempt to allocate when removing
a device.
If there is a performance problem with attempting to allocate large blocks,
consider decreasing this.
The default value is also the maximum.
.
.It Sy zfs_resilver_disable_defer Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore the
.Sy resilver_defer
feature, causing an operation that would start a resilver to
immediately restart the one in progress.
.
.It Sy zfs_resilver_defer_percent Ns = Ns Sy 10 Ns % Pq uint
If the ongoing resilver progress is below this threshold, a new resilver will
restart from scratch instead of being deferred after the current one finishes,
even if the
.Sy resilver_defer
feature is enabled.
.
.It Sy zfs_resilver_min_time_ms Ns = Ns Sy 3000 Ns ms Po 3 s Pc Pq uint
Resilvers are processed by the sync thread.
While resilvering, it will spend at least this much time
working on a resilver between TXG flushes.
.
.It Sy zfs_scan_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
If set, remove the DTL (dirty time list) upon completion of a pool scan (scrub),
even if there were unrepairable errors.
Intended to be used during pool repair or recovery to
stop resilvering when the pool is next imported.
.
.It Sy zfs_scrub_after_expand Ns = Ns Sy 1 Ns | Ns 0 Pq int
Automatically start a pool scrub after a RAIDZ expansion completes
in order to verify the checksums of all blocks which have been
copied during the expansion.
This is enabled by default and strongly recommended.
.
.It Sy zfs_scrub_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq uint
Scrubs are processed by the sync thread.
While scrubbing, it will spend at least this much time
working on a scrub between TXG flushes.
.
.It Sy zfs_scrub_error_blocks_per_txg Ns = Ns Sy 4096 Pq uint
Error blocks to be scrubbed in one txg.
.
.It Sy zfs_scan_checkpoint_intval Ns = Ns Sy 7200 Ns s Po 2 hour Pc Pq uint
To preserve progress across reboots, the sequential scan algorithm periodically
needs to stop metadata scanning and issue all the verification I/O to disk.
The frequency of this flushing is determined by this tunable.
.
.It Sy zfs_scan_fill_weight Ns = Ns Sy 3 Pq uint
This tunable affects how scrub and resilver I/O segments are ordered.
A higher number indicates that we care more about how filled in a segment is,
while a lower number indicates we care more about the size of the extent without
considering the gaps within a segment.
This value is only tunable upon module insertion.
Changing the value afterwards will have no effect on scrub or resilver
performance.
.
.It Sy zfs_scan_issue_strategy Ns = Ns Sy 0 Pq uint
Determines the order that data will be verified while scrubbing or resilvering:
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
Data will be verified as sequentially as possible, given the
amount of memory reserved for scrubbing
.Pq see Sy zfs_scan_mem_lim_fact .
This may improve scrub performance if the pool's data is very fragmented.
.It Sy 2
The largest mostly-contiguous chunk of found data will be verified first.
By deferring scrubbing of small segments, we may later find adjacent data
to coalesce and increase the segment size.
.It Sy 0
.No Use strategy Sy 1 No during normal verification
.No and strategy Sy 2 No while taking a checkpoint .
.El
.
.It Sy zfs_scan_legacy Ns = Ns Sy 0 Ns | Ns 1 Pq int
If unset, indicates that scrubs and resilvers will gather metadata in
memory before issuing sequential I/O.
Otherwise indicates that the legacy algorithm will be used,
where I/O is initiated as soon as it is discovered.
Unsetting will not affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_max_ext_gap Ns = Ns Sy 2097152 Ns B Po 2 MiB Pc Pq int
Sets the largest gap in bytes between scrub/resilver I/O operations
that will still be considered sequential for sorting purposes.
Changing this value will not
affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_mem_lim_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
This tunable determines the hard limit for I/O sorting memory usage.
When the hard limit is reached we stop scanning metadata and start issuing
data verification I/O.
This is done until we get below the soft limit.
.
.It Sy zfs_scan_mem_lim_soft_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
The fraction of the hard limit used to determined the soft limit for I/O sorting
by the sequential scan algorithm.
When we cross this limit from below no action is taken.
When we cross this limit from above it is because we are issuing verification
I/O.
In this case (unless the metadata scan is done) we stop issuing verification I/O
and start scanning metadata again until we get to the hard limit.
.
.It Sy zfs_scan_report_txgs Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When reporting resilver throughput and estimated completion time use the
performance observed over roughly the last
.Sy zfs_scan_report_txgs
TXGs.
When set to zero performance is calculated over the time between checkpoints.
.
.It Sy zfs_scan_strict_mem_lim Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enforce tight memory limits on pool scans when a sequential scan is in progress.
When disabled, the memory limit may be exceeded by fast disks.
.
.It Sy zfs_scan_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq int
Freezes a scrub/resilver in progress without actually pausing it.
Intended for testing/debugging.
.
.It Sy zfs_scan_vdev_limit Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum amount of data that can be concurrently issued at once for scrubs and
resilvers per leaf device, given in bytes.
.
.It Sy zfs_send_corrupt_data Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow sending of corrupt data (ignore read/checksum errors when sending).
.
.It Sy zfs_send_unmodified_spill_blocks Ns = Ns Sy 1 Ns | Ns 0 Pq int
Include unmodified spill blocks in the send stream.
Under certain circumstances, previous versions of ZFS could incorrectly
remove the spill block from an existing object.
Including unmodified copies of the spill blocks creates a backwards-compatible
stream which will recreate a spill block if it was incorrectly removed.
.
.It Sy zfs_send_no_prefetch_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm send
internal queues.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_no_prefetch_queue_length Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
The maximum number of bytes allowed in
.Nm zfs Cm send Ns 's
internal queues.
.
.It Sy zfs_send_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm send
prefetch queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The maximum number of bytes allowed that will be prefetched by
.Nm zfs Cm send .
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm receive
queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_recv_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The maximum number of bytes allowed in the
.Nm zfs Cm receive
queue.
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_write_batch_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
The maximum amount of data, in bytes, that
.Nm zfs Cm receive
will write in one DMU transaction.
This is the uncompressed size, even when receiving a compressed send stream.
This setting will not reduce the write size below a single block.
Capped at a maximum of
.Sy 32 MiB .
.
.It Sy zfs_recv_best_effort_corrective Ns = Ns Sy 0 Pq int
When this variable is set to non-zero a corrective receive:
.Bl -enum -compact -offset 4n -width "1."
.It
Does not enforce the restriction of source & destination snapshot GUIDs
matching.
.It
If there is an error during healing, the healing receive is not
terminated instead it moves on to the next record.
.El
.
.It Sy zfs_override_estimate_recordsize Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Setting this variable overrides the default logic for estimating block
sizes when doing a
.Nm zfs Cm send .
The default heuristic is that the average block size
will be the current recordsize.
Override this value if most data in your dataset is not of that size
and you require accurate zfs send size estimates.
.
.It Sy zfs_sync_pass_deferred_free Ns = Ns Sy 2 Pq uint
Flushing of data to disk is done in passes.
Defer frees starting in this pass.
.
.It Sy zfs_spa_discard_memory_limit Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum memory used for prefetching a checkpoint's space map on each
vdev while discarding the checkpoint.
.
.It Sy zfs_special_class_metadata_reserve_pct Ns = Ns Sy 25 Ns % Pq uint
Only allow small data blocks to be allocated on the special and dedup vdev
types when the available free space percentage on these vdevs exceeds this
value.
This ensures reserved space is available for pool metadata as the
special vdevs approach capacity.
.
.It Sy zfs_sync_pass_dont_compress Ns = Ns Sy 8 Pq uint
Starting in this sync pass, disable compression (including of metadata).
With the default setting, in practice, we don't have this many sync passes,
so this has no effect.
.Pp
The original intent was that disabling compression would help the sync passes
to converge.
However, in practice, disabling compression increases
the average number of sync passes; because when we turn compression off,
many blocks' size will change, and thus we have to re-allocate
(not overwrite) them.
It also increases the number of
.Em 128 KiB
allocations (e.g. for indirect blocks and spacemaps)
because these will not be compressed.
The
.Em 128 KiB
allocations are especially detrimental to performance
on highly fragmented systems, which may have very few free segments of this
size,
and may need to load new metaslabs to satisfy these allocations.
.
.It Sy zfs_sync_pass_rewrite Ns = Ns Sy 2 Pq uint
Rewrite new block pointers starting in this pass.
.
.It Sy zfs_trim_extent_bytes_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
Maximum size of TRIM command.
Larger ranges will be split into chunks no larger than this value before
issuing.
.
.It Sy zfs_trim_extent_bytes_min Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
Minimum size of TRIM commands.
TRIM ranges smaller than this will be skipped,
unless they're part of a larger range which was chunked.
This is done because it's common for these small TRIMs
to negatively impact overall performance.
.
.It Sy zfs_trim_metaslab_skip Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Skip uninitialized metaslabs during the TRIM process.
This option is useful for pools constructed from large thinly-provisioned
devices
where TRIM operations are slow.
As a pool ages, an increasing fraction of the pool's metaslabs
will be initialized, progressively degrading the usefulness of this option.
This setting is stored when starting a manual TRIM and will
persist for the duration of the requested TRIM.
.
.It Sy zfs_trim_queue_limit Ns = Ns Sy 10 Pq uint
Maximum number of queued TRIMs outstanding per leaf vdev.
The number of concurrent TRIM commands issued to the device is controlled by
.Sy zfs_vdev_trim_min_active No and Sy zfs_vdev_trim_max_active .
.
.It Sy zfs_trim_txg_batch Ns = Ns Sy 32 Pq uint
The number of transaction groups' worth of frees which should be aggregated
before TRIM operations are issued to the device.
This setting represents a trade-off between issuing larger,
more efficient TRIM operations and the delay
before the recently trimmed space is available for use by the device.
.Pp
Increasing this value will allow frees to be aggregated for a longer time.
This will result is larger TRIM operations and potentially increased memory
usage.
Decreasing this value will have the opposite effect.
The default of
.Sy 32
was determined to be a reasonable compromise.
.
.It Sy zfs_txg_history Ns = Ns Sy 100 Pq uint
Historical statistics for this many latest TXGs will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
.
.It Sy zfs_txg_timeout Ns = Ns Sy 5 Ns s Pq uint
Flush dirty data to disk at least every this many seconds (maximum TXG
duration).
.
.It Sy zfs_vdev_aggregation_limit Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
Max vdev I/O aggregation size.
.
.It Sy zfs_vdev_aggregation_limit_non_rotating Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
Max vdev I/O aggregation size for non-rotating media.
.
.It Sy zfs_vdev_mirror_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
immediately follows its predecessor on rotational vdevs
for the purpose of making decisions based on load.
.
.It Sy zfs_vdev_mirror_rotating_seek_inc Ns = Ns Sy 5 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks locality as defined by
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_mirror_rotating_seek_offset Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
The maximum distance for the last queued I/O operation in which
the balancing algorithm considers an operation to have locality.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_mirror_non_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member on non-rotational vdevs
when I/O operations do not immediately follow one another.
.
.It Sy zfs_vdev_mirror_non_rotating_seek_inc Ns = Ns Sy 1 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks
locality as defined by the
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_read_gap_limit Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
Aggregate read I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_write_gap_limit Ns = Ns Sy 4096 Ns B Po 4 KiB Pc Pq uint
Aggregate write I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_raidz_impl Ns = Ns Sy fastest Pq string
Select the raidz parity implementation to use.
.Pp
Variants that don't depend on CPU-specific features
may be selected on module load, as they are supported on all systems.
The remaining options may only be set after the module is loaded,
as they are available only if the implementations are compiled in
and supported on the running system.
.Pp
Once the module is loaded,
.Pa /sys/module/zfs/parameters/zfs_vdev_raidz_impl
will show the available options,
with the currently selected one enclosed in square brackets.
.Pp
.TS
lb l l .
fastest selected by built-in benchmark
original original implementation
scalar scalar implementation
sse2 SSE2 instruction set 64-bit x86
ssse3 SSSE3 instruction set 64-bit x86
avx2 AVX2 instruction set 64-bit x86
avx512f AVX512F instruction set 64-bit x86
avx512bw AVX512F & AVX512BW instruction sets 64-bit x86
aarch64_neon NEON Aarch64/64-bit ARMv8
aarch64_neonx2 NEON with more unrolling Aarch64/64-bit ARMv8
powerpc_altivec Altivec PowerPC
.TE
.
.It Sy zfs_vdev_scheduler Pq charp
.Sy DEPRECATED .
Prints warning to kernel log for compatibility.
.
.It Sy zfs_zevent_len_max Ns = Ns Sy 512 Pq uint
Max event queue length.
Events in the queue can be viewed with
.Xr zpool-events 8 .
.
.It Sy zfs_zevent_retain_max Ns = Ns Sy 2000 Pq int
Maximum recent zevent records to retain for duplicate checking.
Setting this to
.Sy 0
disables duplicate detection.
.
.It Sy zfs_zevent_retain_expire_secs Ns = Ns Sy 900 Ns s Po 15 min Pc Pq int
Lifespan for a recent ereport that was retained for duplicate checking.
.
.It Sy zfs_zil_clean_taskq_maxalloc Ns = Ns Sy 1048576 Pq int
The maximum number of taskq entries that are allowed to be cached.
When this limit is exceeded transaction records (itxs)
will be cleaned synchronously.
.
.It Sy zfs_zil_clean_taskq_minalloc Ns = Ns Sy 1024 Pq int
The number of taskq entries that are pre-populated when the taskq is first
created and are immediately available for use.
.
.It Sy zfs_zil_clean_taskq_nthr_pct Ns = Ns Sy 100 Ns % Pq int
This controls the number of threads used by
.Sy dp_zil_clean_taskq .
The default value of
.Sy 100%
will create a maximum of one thread per cpu.
.
.It Sy zil_maxblocksize Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
This sets the maximum block size used by the ZIL.
On very fragmented pools, lowering this
.Pq typically to Sy 36 KiB
can improve performance.
.
.It Sy zil_maxcopied Ns = Ns Sy 7680 Ns B Po 7.5 KiB Pc Pq uint
This sets the maximum number of write bytes logged via WR_COPIED.
It tunes a tradeoff between additional memory copy and possibly worse log
space efficiency vs additional range lock/unlock.
.
.It Sy zil_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable the cache flush commands that are normally sent to disk by
the ZIL after an LWB write has completed.
Setting this will cause ZIL corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zil_replay_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable intent logging replay.
Can be disabled for recovery from corrupted ZIL.
.
.It Sy zil_slog_bulk Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq u64
Limit SLOG write size per commit executed with synchronous priority.
Any writes above that will be executed with lower (asynchronous) priority
to limit potential SLOG device abuse by single active ZIL writer.
.
.It Sy zfs_zil_saxattr Ns = Ns Sy 1 Ns | Ns 0 Pq int
Setting this tunable to zero disables ZIL logging of new
.Sy xattr Ns = Ns Sy sa
records if the
.Sy org.openzfs:zilsaxattr
feature is enabled on the pool.
This would only be necessary to work around bugs in the ZIL logging or replay
code for this record type.
The tunable has no effect if the feature is disabled.
.
.It Sy zfs_embedded_slog_min_ms Ns = Ns Sy 64 Pq uint
Usually, one metaslab from each normal-class vdev is dedicated for use by
the ZIL to log synchronous writes.
However, if there are fewer than
.Sy zfs_embedded_slog_min_ms
metaslabs in the vdev, this functionality is disabled.
This ensures that we don't set aside an unreasonable amount of space for the
ZIL.
.
.It Sy zstd_earlyabort_pass Ns = Ns Sy 1 Pq uint
Whether heuristic for detection of incompressible data with zstd levels >= 3
using LZ4 and zstd-1 passes is enabled.
.
.It Sy zstd_abort_size Ns = Ns Sy 131072 Pq uint
Minimal uncompressed size (inclusive) of a record before the early abort
heuristic will be attempted.
.
.It Sy zio_deadman_log_all Ns = Ns Sy 0 Ns | Ns 1 Pq int
If non-zero, the zio deadman will produce debugging messages
.Pq see Sy zfs_dbgmsg_enable
for all zios, rather than only for leaf zios possessing a vdev.
This is meant to be used by developers to gain
diagnostic information for hang conditions which don't involve a mutex
or other locking primitive: typically conditions in which a thread in
the zio pipeline is looping indefinitely.
.
.It Sy zio_slow_io_ms Ns = Ns Sy 30000 Ns ms Po 30 s Pc Pq int
When an I/O operation takes more than this much time to complete,
it's marked as slow.
Each slow operation causes a delay zevent.
Slow I/O counters can be seen with
.Nm zpool Cm status Fl s .
.
.It Sy zio_dva_throttle_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Throttle block allocations in the I/O pipeline.
This allows for dynamic allocation distribution when devices are imbalanced.
When enabled, the maximum number of pending allocations per top-level vdev
is limited by
.Sy zfs_vdev_queue_depth_pct .
.
.It Sy zfs_xattr_compat Ns = Ns 0 Ns | Ns 1 Pq int
Control the naming scheme used when setting new xattrs in the user namespace.
If
.Sy 0
.Pq the default on Linux ,
user namespace xattr names are prefixed with the namespace, to be backwards
compatible with previous versions of ZFS on Linux.
If
.Sy 1
.Pq the default on Fx ,
user namespace xattr names are not prefixed, to be backwards compatible with
previous versions of ZFS on illumos and
.Fx .
.Pp
Either naming scheme can be read on this and future versions of ZFS, regardless
of this tunable, but legacy ZFS on illumos or
.Fx
are unable to read user namespace xattrs written in the Linux format, and
legacy versions of ZFS on Linux are unable to read user namespace xattrs written
in the legacy ZFS format.
.Pp
An existing xattr with the alternate naming scheme is removed when overwriting
the xattr so as to not accumulate duplicates.
.
.It Sy zio_requeue_io_start_cut_in_line Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prioritize requeued I/O.
.
.It Sy zio_taskq_batch_pct Ns = Ns Sy 80 Ns % Pq uint
Percentage of online CPUs which will run a worker thread for I/O.
These workers are responsible for I/O work such as compression, encryption,
checksum and parity calculations.
Fractional number of CPUs will be rounded down.
.Pp
The default value of
.Sy 80%
was chosen to avoid using all CPUs which can result in
latency issues and inconsistent application performance,
especially when slower compression and/or checksumming is enabled.
Set value only applies to pools imported/created after that.
.
.It Sy zio_taskq_batch_tpq Ns = Ns Sy 0 Pq uint
Number of worker threads per taskq.
Higher values improve I/O ordering and CPU utilization,
while lower reduce lock contention.
Set value only applies to pools imported/created after that.
.Pp
If
.Sy 0 ,
generate a system-dependent value close to 6 threads per taskq.
Set value only applies to pools imported/created after that.
.
.It Sy zio_taskq_write_tpq Ns = Ns Sy 16 Pq uint
Determines the minimum number of threads per write issue taskq.
Higher values improve CPU utilization on high throughput,
while lower reduce taskq locks contention on high IOPS.
Set value only applies to pools imported/created after that.
.
.It Sy zio_taskq_read Ns = Ns Sy fixed,1,8 null scale null Pq charp
Set the queue and thread configuration for the IO read queues.
This is an advanced debugging parameter.
Don't change this unless you understand what it does.
Set values only apply to pools imported/created after that.
.
.It Sy zio_taskq_write Ns = Ns Sy sync null scale null Pq charp
Set the queue and thread configuration for the IO write queues.
This is an advanced debugging parameter.
Don't change this unless you understand what it does.
Set values only apply to pools imported/created after that.
.
.It Sy zvol_inhibit_dev Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Do not create zvol device nodes.
This may slightly improve startup time on
systems with a very large number of zvols.
.
.It Sy zvol_major Ns = Ns Sy 230 Pq uint
Major number for zvol block devices.
.
.It Sy zvol_max_discard_blocks Ns = Ns Sy 16384 Pq long
Discard (TRIM) operations done on zvols will be done in batches of this
many blocks, where block size is determined by the
.Sy volblocksize
property of a zvol.
.
.It Sy zvol_prefetch_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
When adding a zvol to the system, prefetch this many bytes
from the start and end of the volume.
Prefetching these regions of the volume is desirable,
because they are likely to be accessed immediately by
.Xr blkid 8
or the kernel partitioner.
.
.It Sy zvol_request_sync Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When processing I/O requests for a zvol, submit them synchronously.
This effectively limits the queue depth to
.Em 1
for each I/O submitter.
When unset, requests are handled asynchronously by a thread pool.
The number of requests which can be handled concurrently is controlled by
.Sy zvol_threads .
.Sy zvol_request_sync
is ignored when running on a kernel that supports block multiqueue
.Pq Li blk-mq .
.
.It Sy zvol_num_taskqs Ns = Ns Sy 0 Pq uint
Number of zvol taskqs.
If
.Sy 0
(the default) then scaling is done internally to prefer 6 threads per taskq.
This only applies on Linux.
.
.It Sy zvol_threads Ns = Ns Sy 0 Pq uint
The number of system wide threads to use for processing zvol block IOs.
If
.Sy 0
(the default) then internally set
.Sy zvol_threads
to the number of CPUs present or 32 (whichever is greater).
.
.It Sy zvol_blk_mq_threads Ns = Ns Sy 0 Pq uint
The number of threads per zvol to use for queuing IO requests.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
If
.Sy 0
(the default) then internally set
.Sy zvol_blk_mq_threads
to the number of CPUs present.
.
.It Sy zvol_use_blk_mq Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Set to
.Sy 1
to use the
.Li blk-mq
API for zvols.
Set to
.Sy 0
(the default) to use the legacy zvol APIs.
This setting can give better or worse zvol performance depending on
the workload.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
.
.It Sy zvol_blk_mq_blocks_per_thread Ns = Ns Sy 8 Pq uint
If
.Sy zvol_use_blk_mq
is enabled, then process this number of
.Sy volblocksize Ns -sized blocks per zvol thread.
This tunable can be use to favor better performance for zvol reads (lower
values) or writes (higher values).
If set to
.Sy 0 ,
then the zvol layer will process the maximum number of blocks
per thread that it can.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
.
.It Sy zvol_blk_mq_queue_depth Ns = Ns Sy 0 Pq uint
The queue_depth value for the zvol
.Li blk-mq
interface.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
If
.Sy 0
(the default) then use the kernel's default queue depth.
Values are clamped to the kernel's
.Dv BLKDEV_MIN_RQ
and
.Dv BLKDEV_MAX_RQ Ns / Ns Dv BLKDEV_DEFAULT_RQ
limits.
.
.It Sy zvol_volmode Ns = Ns Sy 1 Pq uint
Defines zvol block devices behaviour when
.Sy volmode Ns = Ns Sy default :
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
.No equivalent to Sy full
.It Sy 2
.No equivalent to Sy dev
.It Sy 3
.No equivalent to Sy none
.El
.
.It Sy zvol_enforce_quotas Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Enable strict ZVOL quota enforcement.
The strict quota enforcement may have a performance impact.
.El
.
.Sh ZFS I/O SCHEDULER
ZFS issues I/O operations to leaf vdevs to satisfy and complete I/O operations.
The scheduler determines when and in what order those operations are issued.
The scheduler divides operations into five I/O classes,
prioritized in the following order: sync read, sync write, async read,
async write, and scrub/resilver.
Each queue defines the minimum and maximum number of concurrent operations
that may be issued to the device.
In addition, the device has an aggregate maximum,
.Sy zfs_vdev_max_active .
Note that the sum of the per-queue minima must not exceed the aggregate maximum.
If the sum of the per-queue maxima exceeds the aggregate maximum,
then the number of active operations may reach
.Sy zfs_vdev_max_active ,
in which case no further operations will be issued,
regardless of whether all per-queue minima have been met.
.Pp
For many physical devices, throughput increases with the number of
concurrent operations, but latency typically suffers.
Furthermore, physical devices typically have a limit
at which more concurrent operations have no
effect on throughput or can actually cause it to decrease.
.Pp
The scheduler selects the next operation to issue by first looking for an
I/O class whose minimum has not been satisfied.
Once all are satisfied and the aggregate maximum has not been hit,
the scheduler looks for classes whose maximum has not been satisfied.
Iteration through the I/O classes is done in the order specified above.
No further operations are issued
if the aggregate maximum number of concurrent operations has been hit,
or if there are no operations queued for an I/O class that has not hit its
maximum.
Every time an I/O operation is queued or an operation completes,
the scheduler looks for new operations to issue.
.Pp
In general, smaller
.Sy max_active Ns s
will lead to lower latency of synchronous operations.
Larger
.Sy max_active Ns s
may lead to higher overall throughput, depending on underlying storage.
.Pp
The ratio of the queues'
.Sy max_active Ns s
determines the balance of performance between reads, writes, and scrubs.
For example, increasing
.Sy zfs_vdev_scrub_max_active
will cause the scrub or resilver to complete more quickly,
but reads and writes to have higher latency and lower throughput.
.Pp
All I/O classes have a fixed maximum number of outstanding operations,
except for the async write class.
Asynchronous writes represent the data that is committed to stable storage
during the syncing stage for transaction groups.
Transaction groups enter the syncing state periodically,
so the number of queued async writes will quickly burst up
and then bleed down to zero.
Rather than servicing them as quickly as possible,
the I/O scheduler changes the maximum number of active async write operations
according to the amount of dirty data in the pool.
Since both throughput and latency typically increase with the number of
concurrent operations issued to physical devices, reducing the
burstiness in the number of simultaneous operations also stabilizes the
response time of operations from other queues, in particular synchronous ones.
In broad strokes, the I/O scheduler will issue more concurrent operations
from the async write queue as there is more dirty data in the pool.
.
.Ss Async Writes
The number of concurrent operations issued for the async write I/O class
follows a piece-wise linear function defined by a few adjustable points:
.Bd -literal
| o---------| <-- \fBzfs_vdev_async_write_max_active\fP
^ | /^ |
| | / | |
active | / | |
I/O | / | |
count | / | |
| / | |
|-------o | | <-- \fBzfs_vdev_async_write_min_active\fP
0|_______^______|_________|
0% | | 100% of \fBzfs_dirty_data_max\fP
| |
| `-- \fBzfs_vdev_async_write_active_max_dirty_percent\fP
`--------- \fBzfs_vdev_async_write_active_min_dirty_percent\fP
.Ed
.Pp
Until the amount of dirty data exceeds a minimum percentage of the dirty
data allowed in the pool, the I/O scheduler will limit the number of
concurrent operations to the minimum.
As that threshold is crossed, the number of concurrent operations issued
increases linearly to the maximum at the specified maximum percentage
of the dirty data allowed in the pool.
.Pp
Ideally, the amount of dirty data on a busy pool will stay in the sloped
part of the function between
.Sy zfs_vdev_async_write_active_min_dirty_percent
and
.Sy zfs_vdev_async_write_active_max_dirty_percent .
If it exceeds the maximum percentage,
this indicates that the rate of incoming data is
greater than the rate that the backend storage can handle.
In this case, we must further throttle incoming writes,
as described in the next section.
.
.Sh ZFS TRANSACTION DELAY
We delay transactions when we've determined that the backend storage
isn't able to accommodate the rate of incoming writes.
.Pp
If there is already a transaction waiting, we delay relative to when
that transaction will finish waiting.
This way the calculated delay time
is independent of the number of threads concurrently executing transactions.
.Pp
If we are the only waiter, wait relative to when the transaction started,
rather than the current time.
This credits the transaction for "time already served",
e.g. reading indirect blocks.
.Pp
The minimum time for a transaction to take is calculated as
.D1 min_time = min( Ns Sy zfs_delay_scale No \(mu Po Sy dirty No \- Sy min Pc / Po Sy max No \- Sy dirty Pc , 100ms)
.Pp
The delay has two degrees of freedom that can be adjusted via tunables.
The percentage of dirty data at which we start to delay is defined by
.Sy zfs_delay_min_dirty_percent .
This should typically be at or above
.Sy zfs_vdev_async_write_active_max_dirty_percent ,
so that we only start to delay after writing at full speed
has failed to keep up with the incoming write rate.
The scale of the curve is defined by
.Sy zfs_delay_scale .
Roughly speaking, this variable determines the amount of delay at the midpoint
of the curve.
.Bd -literal
delay
10ms +-------------------------------------------------------------*+
| *|
9ms + *+
| *|
8ms + *+
| * |
7ms + * +
| * |
6ms + * +
| * |
5ms + * +
| * |
4ms + * +
| * |
3ms + * +
| * |
2ms + (midpoint) * +
| | ** |
1ms + v *** +
| \fBzfs_delay_scale\fP ----------> ******** |
0 +-------------------------------------*********----------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note, that since the delay is added to the outstanding time remaining on the
most recent transaction it's effectively the inverse of IOPS.
Here, the midpoint of
.Em 500 us
translates to
.Em 2000 IOPS .
The shape of the curve
was chosen such that small changes in the amount of accumulated dirty data
in the first three quarters of the curve yield relatively small differences
in the amount of delay.
.Pp
The effects can be easier to understand when the amount of delay is
represented on a logarithmic scale:
.Bd -literal
delay
100ms +-------------------------------------------------------------++
+ +
| |
+ *+
10ms + *+
+ ** +
| (midpoint) ** |
+ | ** +
1ms + v **** +
+ \fBzfs_delay_scale\fP ----------> ***** +
| **** |
+ **** +
100us + ** +
+ * +
| * |
+ * +
10us + * +
+ +
| |
+ +
+--------------------------------------------------------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note here that only as the amount of dirty data approaches its limit does
the delay start to increase rapidly.
The goal of a properly tuned system should be to keep the amount of dirty data
out of that range by first ensuring that the appropriate limits are set
for the I/O scheduler to reach optimal throughput on the back-end storage,
and then by changing the value of
.Sy zfs_delay_scale
to increase the steepness of the curve.
diff --git a/sys/contrib/openzfs/man/man8/zfs-destroy.8 b/sys/contrib/openzfs/man/man8/zfs-destroy.8
index 247c561322bf..97596b28444b 100644
--- a/sys/contrib/openzfs/man/man8/zfs-destroy.8
+++ b/sys/contrib/openzfs/man/man8/zfs-destroy.8
@@ -1,226 +1,235 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
.Dd March 16, 2022
.Dt ZFS-DESTROY 8
.Os
.
.Sh NAME
.Nm zfs-destroy
.Nd destroy ZFS dataset, snapshots, or bookmark
.Sh SYNOPSIS
.Nm zfs
.Cm destroy
.Op Fl Rfnprv
.Ar filesystem Ns | Ns Ar volume
.Nm zfs
.Cm destroy
.Op Fl Rdnprv
.Ar filesystem Ns | Ns Ar volume Ns @ Ns Ar snap Ns
.Oo % Ns Ar snap Ns Oo , Ns Ar snap Ns Oo % Ns Ar snap Oc Oc Oc Ns …
.Nm zfs
.Cm destroy
.Ar filesystem Ns | Ns Ar volume Ns # Ns Ar bookmark
.
.Sh DESCRIPTION
.Bl -tag -width ""
.It Xo
.Nm zfs
.Cm destroy
.Op Fl Rfnprv
.Ar filesystem Ns | Ns Ar volume
.Xc
Destroys the given dataset.
By default, the command unshares any file systems that are currently shared,
unmounts any file systems that are currently mounted, and refuses to destroy a
dataset that has active dependents
.Pq children or clones .
.Bl -tag -width "-R"
.It Fl R
Recursively destroy all dependents, including cloned file systems outside the
target hierarchy.
.It Fl f
Forcibly unmount file systems.
This option has no effect on non-file systems or unmounted file systems.
.It Fl n
Do a dry-run
.Pq Qq No-op
deletion.
No data will be deleted.
This is useful in conjunction with the
.Fl v
or
.Fl p
flags to determine what data would be deleted.
.It Fl p
Print machine-parsable verbose information about the deleted data.
.It Fl r
Recursively destroy all children.
.It Fl v
Print verbose information about the deleted data.
.El
.Pp
Extreme care should be taken when applying either the
.Fl r
or the
.Fl R
options, as they can destroy large portions of a pool and cause unexpected
behavior for mounted file systems in use.
.It Xo
.Nm zfs
.Cm destroy
.Op Fl Rdnprv
.Ar filesystem Ns | Ns Ar volume Ns @ Ns Ar snap Ns
.Oo % Ns Ar snap Ns Oo , Ns Ar snap Ns Oo % Ns Ar snap Oc Oc Oc Ns …
.Xc
-The given snapshots are destroyed immediately if and only if the
+Attempts to destroy the given snapshot(s).
+This will fail if any clones of the snapshot exist or if the snapshot is held.
+In this case, by default,
.Nm zfs Cm destroy
-command without the
+will have no effect and exit in error.
+If the
.Fl d
-option would have destroyed it.
-Such immediate destruction would occur, for example, if the snapshot had no
-clones and the user-initiated reference count were zero.
+option is applied, the command will instead mark the given snapshot for
+automatic destruction as soon as it becomes eligible.
+While marked for destruction, a snapshot remains visible, and the user may
+create new clones from it and place new holds on it.
.Pp
-If a snapshot does not qualify for immediate destruction, it is marked for
-deferred deletion.
-In this state, it exists as a usable, visible snapshot until both of the
-preconditions listed above are met, at which point it is destroyed.
+The read-only snapshot properties
+.Sy defer_destroy
+and
+.Sy userrefs
+are used by
+.Nm zfs Cm destroy
+to determine eligibility and marked status.
.Pp
An inclusive range of snapshots may be specified by separating the first and
last snapshots with a percent sign.
The first and/or last snapshots may be left blank, in which case the
filesystem's oldest or newest snapshot will be implied.
.Pp
Multiple snapshots
.Pq or ranges of snapshots
of the same filesystem or volume may be specified in a comma-separated list of
snapshots.
Only the snapshot's short name
.Po the part after the
.Sy @
.Pc
should be specified when using a range or comma-separated list to identify
multiple snapshots.
.Bl -tag -width "-R"
.It Fl R
Recursively destroy all clones of these snapshots, including the clones,
snapshots, and children.
If this flag is specified, the
.Fl d
flag will have no effect.
.It Fl d
-Destroy immediately.
-If a snapshot cannot be destroyed now, mark it for deferred destruction.
+Rather than returning error if the given snapshot is ineligible for immediate
+destruction, mark it for deferred, automatic destruction once it becomes
+eligible.
.It Fl n
Do a dry-run
.Pq Qq No-op
deletion.
No data will be deleted.
This is useful in conjunction with the
.Fl p
or
.Fl v
flags to determine what data would be deleted.
.It Fl p
Print machine-parsable verbose information about the deleted data.
.It Fl r
Destroy
.Pq or mark for deferred deletion
all snapshots with this name in descendent file systems.
.It Fl v
Print verbose information about the deleted data.
.El
.Pp
Extreme care should be taken when applying either the
.Fl r
or the
.Fl R
options, as they can destroy large portions of a pool and cause unexpected
behavior for mounted file systems in use.
.It Xo
.Nm zfs
.Cm destroy
.Ar filesystem Ns | Ns Ar volume Ns # Ns Ar bookmark
.Xc
The given bookmark is destroyed.
.El
.
.Sh EXAMPLES
.\" These are, respectively, examples 3, 10, 15 from zfs.8
.\" Make sure to update them bidirectionally
.Ss Example 1 : No Creating and Destroying Multiple Snapshots
The following command creates snapshots named
.Ar yesterday No of Ar pool/home
and all of its descendent file systems.
Each snapshot is mounted on demand in the
.Pa .zfs/snapshot
directory at the root of its file system.
The second command destroys the newly created snapshots.
.Dl # Nm zfs Cm snapshot Fl r Ar pool/home Ns @ Ns Ar yesterday
.Dl # Nm zfs Cm destroy Fl r Ar pool/home Ns @ Ns Ar yesterday
.
.Ss Example 2 : No Promoting a ZFS Clone
The following commands illustrate how to test out changes to a file system, and
then replace the original file system with the changed one, using clones, clone
promotion, and renaming:
.Bd -literal -compact -offset Ds
.No # Nm zfs Cm create Ar pool/project/production
populate /pool/project/production with data
.No # Nm zfs Cm snapshot Ar pool/project/production Ns @ Ns Ar today
.No # Nm zfs Cm clone Ar pool/project/production@today pool/project/beta
make changes to /pool/project/beta and test them
.No # Nm zfs Cm promote Ar pool/project/beta
.No # Nm zfs Cm rename Ar pool/project/production pool/project/legacy
.No # Nm zfs Cm rename Ar pool/project/beta pool/project/production
once the legacy version is no longer needed, it can be destroyed
.No # Nm zfs Cm destroy Ar pool/project/legacy
.Ed
.
.Ss Example 3 : No Performing a Rolling Snapshot
The following example shows how to maintain a history of snapshots with a
consistent naming scheme.
To keep a week's worth of snapshots, the user destroys the oldest snapshot,
renames the remaining snapshots, and then creates a new snapshot, as follows:
.Bd -literal -compact -offset Ds
.No # Nm zfs Cm destroy Fl r Ar pool/users@7daysago
.No # Nm zfs Cm rename Fl r Ar pool/users@6daysago No @ Ns Ar 7daysago
.No # Nm zfs Cm rename Fl r Ar pool/users@5daysago No @ Ns Ar 6daysago
.No # Nm zfs Cm rename Fl r Ar pool/users@4daysago No @ Ns Ar 5daysago
.No # Nm zfs Cm rename Fl r Ar pool/users@3daysago No @ Ns Ar 4daysago
.No # Nm zfs Cm rename Fl r Ar pool/users@2daysago No @ Ns Ar 3daysago
.No # Nm zfs Cm rename Fl r Ar pool/users@yesterday No @ Ns Ar 2daysago
.No # Nm zfs Cm rename Fl r Ar pool/users@today No @ Ns Ar yesterday
.No # Nm zfs Cm snapshot Fl r Ar pool/users Ns @ Ns Ar today
.Ed
.
.Sh SEE ALSO
.Xr zfs-create 8 ,
-.Xr zfs-hold 8
+.Xr zfs-hold 8 ,
+.Xr zfsprops 8
diff --git a/sys/contrib/openzfs/module/Kbuild.in b/sys/contrib/openzfs/module/Kbuild.in
index fc14d5cb535e..5190afc506f9 100644
--- a/sys/contrib/openzfs/module/Kbuild.in
+++ b/sys/contrib/openzfs/module/Kbuild.in
@@ -1,507 +1,508 @@
# When integrated in to a monolithic kernel the spl module must appear
# first. This ensures its module initialization function is run before
# any of the other module initialization functions which depend on it.
ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement
ZFS_MODULE_CFLAGS += -Wmissing-prototypes
ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @NO_FORMAT_ZERO_LENGTH@
ifneq ($(KBUILD_EXTMOD),)
zfs_include = @abs_top_srcdir@/include
icp_include = @abs_srcdir@/icp/include
zstd_include = @abs_srcdir@/zstd/include
ZFS_MODULE_CFLAGS += -include @abs_top_builddir@/zfs_config.h
ZFS_MODULE_CFLAGS += -I@abs_top_builddir@/include
src = @abs_srcdir@
obj = @abs_builddir@
else
zfs_include = $(srctree)/include/zfs
icp_include = $(src)/icp/include
zstd_include = $(src)/zstd/include
ZFS_MODULE_CFLAGS += -include $(zfs_include)/zfs_config.h
endif
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/kernel
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/spl
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/zfs
ZFS_MODULE_CFLAGS += -I$(zfs_include)
ZFS_MODULE_CPPFLAGS += -D_KERNEL
ZFS_MODULE_CPPFLAGS += @KERNEL_DEBUG_CPPFLAGS@
# KASAN enables -Werror=frame-larger-than=1024, which
# breaks oh so many parts of our build.
ifeq ($(CONFIG_KASAN),y)
ZFS_MODULE_CFLAGS += -Wno-error=frame-larger-than=
endif
# Generated binary search code is particularly bad with this optimization.
# Oddly, range_tree.c is not affected when unrolling is not done and dsl_scan.c
# is not affected when unrolling is done.
# Disable it until the following upstream issue is resolved:
# https://github.com/llvm/llvm-project/issues/62790
ifeq ($(CONFIG_X86),y)
ifeq ($(CONFIG_CC_IS_CLANG),y)
CFLAGS_zfs/dsl_scan.o += -mllvm -x86-cmov-converter=false
CFLAGS_zfs/metaslab.o += -mllvm -x86-cmov-converter=false
CFLAGS_zfs/range_tree.o += -mllvm -x86-cmov-converter=false
CFLAGS_zfs/zap_micro.o += -mllvm -x86-cmov-converter=false
endif
endif
ifneq ($(KBUILD_EXTMOD),)
@CONFIG_QAT_TRUE@ZFS_MODULE_CFLAGS += -I@QAT_SRC@/include
@CONFIG_QAT_TRUE@KBUILD_EXTRA_SYMBOLS += @QAT_SYMBOLS@
endif
asflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
ccflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
ifeq ($(CONFIG_ARM64),y)
CFLAGS_REMOVE_zcommon/zfs_fletcher_aarch64_neon.o += -mgeneral-regs-only
CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neon.o += -mgeneral-regs-only
CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neonx2.o += -mgeneral-regs-only
endif
# Suppress unused-value warnings in sparc64 architecture headers
ccflags-$(CONFIG_SPARC64) += -Wno-unused-value
obj-$(CONFIG_ZFS) := spl.o zfs.o
SPL_OBJS := \
spl-atomic.o \
spl-condvar.o \
spl-cred.o \
spl-err.o \
spl-generic.o \
spl-kmem-cache.o \
spl-kmem.o \
spl-kstat.o \
spl-proc.o \
spl-procfs-list.o \
spl-shrinker.o \
spl-taskq.o \
spl-thread.o \
spl-trace.o \
spl-tsd.o \
spl-vmem.o \
spl-xdr.o \
spl-zlib.o \
spl-zone.o
spl-objs += $(addprefix os/linux/spl/,$(SPL_OBJS))
zfs-objs += avl/avl.o
ICP_OBJS := \
algs/aes/aes_impl.o \
algs/aes/aes_impl_generic.o \
algs/aes/aes_modes.o \
algs/blake3/blake3.o \
algs/blake3/blake3_generic.o \
algs/blake3/blake3_impl.o \
algs/edonr/edonr.o \
algs/modes/ccm.o \
algs/modes/gcm.o \
algs/modes/gcm_generic.o \
algs/modes/modes.o \
algs/sha2/sha2_generic.o \
algs/sha2/sha256_impl.o \
algs/sha2/sha512_impl.o \
algs/skein/skein.o \
algs/skein/skein_block.o \
algs/skein/skein_iv.o \
api/kcf_cipher.o \
api/kcf_ctxops.o \
api/kcf_mac.o \
core/kcf_callprov.o \
core/kcf_mech_tabs.o \
core/kcf_prov_lib.o \
core/kcf_prov_tabs.o \
core/kcf_sched.o \
illumos-crypto.o \
io/aes.o \
io/sha2_mod.o \
spi/kcf_spi.o
ICP_OBJS_X86_64 := \
asm-x86_64/aes/aes_aesni.o \
asm-x86_64/aes/aes_amd64.o \
asm-x86_64/aes/aeskey.o \
asm-x86_64/blake3/blake3_avx2.o \
asm-x86_64/blake3/blake3_avx512.o \
asm-x86_64/blake3/blake3_sse2.o \
asm-x86_64/blake3/blake3_sse41.o \
asm-x86_64/sha2/sha256-x86_64.o \
asm-x86_64/sha2/sha512-x86_64.o \
asm-x86_64/modes/aesni-gcm-x86_64.o \
asm-x86_64/modes/gcm_pclmulqdq.o \
asm-x86_64/modes/ghash-x86_64.o
ICP_OBJS_X86 := \
algs/aes/aes_impl_aesni.o \
algs/aes/aes_impl_x86-64.o \
algs/modes/gcm_pclmulqdq.o
ICP_OBJS_ARM := \
asm-arm/sha2/sha256-armv7.o \
asm-arm/sha2/sha512-armv7.o
ICP_OBJS_ARM64 := \
asm-aarch64/blake3/b3_aarch64_sse2.o \
asm-aarch64/blake3/b3_aarch64_sse41.o \
asm-aarch64/sha2/sha256-armv8.o \
asm-aarch64/sha2/sha512-armv8.o
ICP_OBJS_PPC_PPC64 := \
asm-ppc64/blake3/b3_ppc64le_sse2.o \
asm-ppc64/blake3/b3_ppc64le_sse41.o \
asm-ppc64/sha2/sha256-p8.o \
asm-ppc64/sha2/sha512-p8.o \
asm-ppc64/sha2/sha256-ppc.o \
asm-ppc64/sha2/sha512-ppc.o
zfs-objs += $(addprefix icp/,$(ICP_OBJS))
zfs-$(CONFIG_X86) += $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_X86_64) += $(addprefix icp/,$(ICP_OBJS_X86_64))
zfs-$(CONFIG_ARM) += $(addprefix icp/,$(ICP_OBJS_ARM))
zfs-$(CONFIG_ARM64) += $(addprefix icp/,$(ICP_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : asflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include)
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : ccflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include)
# Suppress objtool "return with modified stack frame" warnings.
OBJECT_FILES_NON_STANDARD_aesni-gcm-x86_64.o := y
# Suppress objtool "unsupported stack pointer realignment" warnings.
# See #6950 for the reasoning.
OBJECT_FILES_NON_STANDARD_sha256-x86_64.o := y
OBJECT_FILES_NON_STANDARD_sha512-x86_64.o := y
LUA_OBJS := \
lapi.o \
lauxlib.o \
lbaselib.o \
lcode.o \
lcompat.o \
lcorolib.o \
lctype.o \
ldebug.o \
ldo.o \
lfunc.o \
lgc.o \
llex.o \
lmem.o \
lobject.o \
lopcodes.o \
lparser.o \
lstate.o \
lstring.o \
lstrlib.o \
ltable.o \
ltablib.o \
ltm.o \
lvm.o \
lzio.o \
setjmp/setjmp.o
zfs-objs += $(addprefix lua/,$(LUA_OBJS))
NVPAIR_OBJS := \
fnvpair.o \
nvpair.o \
nvpair_alloc_fixed.o \
nvpair_alloc_spl.o
zfs-objs += $(addprefix nvpair/,$(NVPAIR_OBJS))
UNICODE_OBJS := \
u8_textprep.o
zfs-objs += $(addprefix unicode/,$(UNICODE_OBJS))
ZCOMMON_OBJS := \
cityhash.o \
simd_stat.o \
zfeature_common.o \
zfs_comutil.o \
zfs_deleg.o \
zfs_fletcher.o \
zfs_fletcher_superscalar.o \
zfs_fletcher_superscalar4.o \
zfs_namecheck.o \
zfs_prop.o \
zfs_valstr.o \
zpool_prop.o \
zprop_common.o
ZCOMMON_OBJS_X86 := \
zfs_fletcher_avx512.o \
zfs_fletcher_intel.o \
zfs_fletcher_sse.o
ZCOMMON_OBJS_ARM64 := \
zfs_fletcher_aarch64_neon.o
zfs-objs += $(addprefix zcommon/,$(ZCOMMON_OBJS))
zfs-$(CONFIG_X86) += $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zcommon/,$(ZCOMMON_OBJS_ARM64))
# Zstd uses -O3 by default, so we should follow
ZFS_ZSTD_FLAGS := -O3
# -fno-tree-vectorize gets set for gcc in zstd/common/compiler.h
# Set it for other compilers, too.
ZFS_ZSTD_FLAGS += -fno-tree-vectorize
# SSE register return with SSE disabled if -march=znverX is passed
ZFS_ZSTD_FLAGS += -U__BMI__
# Quiet warnings about frame size due to unused code in unmodified zstd lib
ZFS_ZSTD_FLAGS += -Wframe-larger-than=20480
ZSTD_OBJS := \
zfs_zstd.o \
zstd_sparc.o
ZSTD_UPSTREAM_OBJS := \
lib/common/entropy_common.o \
lib/common/error_private.o \
lib/common/fse_decompress.o \
lib/common/pool.o \
lib/common/zstd_common.o \
lib/compress/fse_compress.o \
lib/compress/hist.o \
lib/compress/huf_compress.o \
lib/compress/zstd_compress.o \
lib/compress/zstd_compress_literals.o \
lib/compress/zstd_compress_sequences.o \
lib/compress/zstd_compress_superblock.o \
lib/compress/zstd_double_fast.o \
lib/compress/zstd_fast.o \
lib/compress/zstd_lazy.o \
lib/compress/zstd_ldm.o \
lib/compress/zstd_opt.o \
lib/decompress/huf_decompress.o \
lib/decompress/zstd_ddict.o \
lib/decompress/zstd_decompress.o \
lib/decompress/zstd_decompress_block.o
zfs-objs += $(addprefix zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS))
# Disable aarch64 neon SIMD instructions for kernel mode
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -I$(zstd_include) $(ZFS_ZSTD_FLAGS)
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : asflags-y += -I$(zstd_include)
$(addprefix $(obj)/zstd/,$(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -include $(zstd_include)/aarch64_compat.h -include $(zstd_include)/zstd_compat_wrapper.h -Wp,-w
$(obj)/zstd/zfs_zstd.o : ccflags-y += -include $(zstd_include)/zstd_compat_wrapper.h
ZFS_OBJS := \
abd.o \
aggsum.o \
arc.o \
blake3_zfs.o \
blkptr.o \
bplist.o \
bpobj.o \
bptree.o \
bqueue.o \
brt.o \
btree.o \
dataset_kstats.o \
dbuf.o \
dbuf_stats.o \
ddt.o \
ddt_log.o \
ddt_stats.o \
ddt_zap.o \
dmu.o \
dmu_direct.o \
dmu_diff.o \
dmu_object.o \
dmu_objset.o \
dmu_recv.o \
dmu_redact.o \
dmu_send.o \
dmu_traverse.o \
dmu_tx.o \
dmu_zfetch.o \
dnode.o \
dnode_sync.o \
dsl_bookmark.o \
dsl_crypt.o \
dsl_dataset.o \
dsl_deadlist.o \
dsl_deleg.o \
dsl_destroy.o \
dsl_dir.o \
dsl_pool.o \
dsl_prop.o \
dsl_scan.o \
dsl_synctask.o \
dsl_userhold.o \
edonr_zfs.o \
fm.o \
gzip.o \
hkdf.o \
lz4.o \
lz4_zfs.o \
lzjb.o \
metaslab.o \
mmp.o \
multilist.o \
objlist.o \
pathname.o \
range_tree.o \
refcount.o \
rrwlock.o \
sa.o \
sha2_zfs.o \
skein_zfs.o \
spa.o \
spa_checkpoint.o \
spa_config.o \
spa_errlog.o \
spa_history.o \
spa_log_spacemap.o \
spa_misc.o \
spa_stats.o \
space_map.o \
space_reftree.o \
txg.o \
uberblock.o \
unique.o \
vdev.o \
vdev_draid.o \
vdev_draid_rand.o \
vdev_indirect.o \
vdev_indirect_births.o \
vdev_indirect_mapping.o \
vdev_initialize.o \
vdev_label.o \
vdev_mirror.o \
vdev_missing.o \
vdev_queue.o \
vdev_raidz.o \
vdev_raidz_math.o \
vdev_raidz_math_scalar.o \
vdev_rebuild.o \
vdev_removal.o \
vdev_root.o \
vdev_trim.o \
zap.o \
zap_leaf.o \
zap_micro.o \
zcp.o \
zcp_get.o \
zcp_global.o \
zcp_iter.o \
zcp_set.o \
zcp_synctask.o \
zfeature.o \
zfs_byteswap.o \
zfs_chksum.o \
zfs_fm.o \
zfs_fuid.o \
zfs_impl.o \
zfs_ioctl.o \
zfs_log.o \
zfs_onexit.o \
zfs_quota.o \
zfs_ratelimit.o \
zfs_replay.o \
zfs_rlock.o \
zfs_sa.o \
zfs_vnops.o \
zfs_znode.o \
zil.o \
zio.o \
zio_checksum.o \
zio_compress.o \
zio_inject.o \
zle.o \
zrlock.o \
zthr.o \
zvol.o
ZFS_OBJS_OS := \
abd_os.o \
arc_os.o \
mmp_os.o \
policy.o \
qat.o \
qat_compress.o \
qat_crypt.o \
spa_misc_os.o \
trace.o \
vdev_disk.o \
vdev_file.o \
+ vdev_raidz.o \
vdev_label_os.o \
zfs_acl.o \
zfs_ctldir.o \
zfs_debug.o \
zfs_dir.o \
zfs_file_os.o \
zfs_ioctl_os.o \
zfs_racct.o \
zfs_sysfs.o \
zfs_uio.o \
zfs_vfsops.o \
zfs_vnops_os.o \
zfs_znode_os.o \
zio_crypt.o \
zpl_ctldir.o \
zpl_export.o \
zpl_file.o \
zpl_file_range.o \
zpl_inode.o \
zpl_super.o \
zpl_xattr.o \
zvol_os.o
ZFS_OBJS_X86 := \
vdev_raidz_math_avx2.o \
vdev_raidz_math_avx512bw.o \
vdev_raidz_math_avx512f.o \
vdev_raidz_math_sse2.o \
vdev_raidz_math_ssse3.o
ZFS_OBJS_ARM64 := \
vdev_raidz_math_aarch64_neon.o \
vdev_raidz_math_aarch64_neonx2.o
ZFS_OBJS_PPC_PPC64 := \
vdev_raidz_math_powerpc_altivec.o
zfs-objs += $(addprefix zfs/,$(ZFS_OBJS)) $(addprefix os/linux/zfs/,$(ZFS_OBJS_OS))
zfs-$(CONFIG_X86) += $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zfs/,$(ZFS_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
UBSAN_SANITIZE_zap_leaf.o := n
UBSAN_SANITIZE_zap_micro.o := n
UBSAN_SANITIZE_sa.o := n
UBSAN_SANITIZE_zfs/zap_micro.o := n
UBSAN_SANITIZE_zfs/sa.o := n
# Suppress incorrect warnings from versions of objtool which are not
# aware of x86 EVEX prefix instructions used for AVX512.
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512bw.o := y
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512f.o := y
ifeq ($(CONFIG_ALTIVEC),y)
$(obj)/zfs/vdev_raidz_math_powerpc_altivec.o : c_flags += -maltivec
endif
diff --git a/sys/contrib/openzfs/module/Makefile.in b/sys/contrib/openzfs/module/Makefile.in
index f76e94afa410..a65cbfce1a90 100644
--- a/sys/contrib/openzfs/module/Makefile.in
+++ b/sys/contrib/openzfs/module/Makefile.in
@@ -1,174 +1,174 @@
include Kbuild
INSTALL_MOD_DIR ?= extra
INSTALL_MOD_PATH ?= $(DESTDIR)
all: modules
distclean maintainer-clean: clean
install: modules_install data_install
uninstall: modules_uninstall data_uninstall
check:
.PHONY: all distclean maintainer-clean install uninstall check distdir \
modules modules-Linux modules-FreeBSD modules-unknown \
clean clean-Linux clean-FreeBSD \
modules_install modules_install-Linux modules_install-FreeBSD \
data_install data_install-Linux data_install-FreeBSD \
modules_uninstall modules_uninstall-Linux modules_uninstall-FreeBSD \
data_uninstall data_uninstall-Linux data_uninstall-FreeBSD \
cppcheck cppcheck-Linux cppcheck-FreeBSD
# For FreeBSD, use debug options from ./configure if not overridden.
export WITH_DEBUG ?= @WITH_DEBUG@
export WITH_INVARIANTS ?= @WITH_INVARIANTS@
# Filter out options that FreeBSD make doesn't understand
getflags = ( \
set -- \
$(filter-out --%,$(firstword $(MFLAGS))) \
$(filter -I%,$(MFLAGS)) \
$(filter -j%,$(MFLAGS)); \
fmakeflags=""; \
while getopts :deiI:j:knqrstw flag; do \
case $$flag in \
\?) :;; \
:) if [ $$OPTARG = "j" ]; then \
ncpus=$$(sysctl -n kern.smp.cpus 2>/dev/null || :); \
if [ -n "$$ncpus" ]; then fmakeflags="$$fmakeflags -j$$ncpus"; fi; \
fi;; \
d) fmakeflags="$$fmakeflags -dA";; \
*) fmakeflags="$$fmakeflags -$$flag$$OPTARG";; \
esac; \
done; \
echo $$fmakeflags \
)
FMAKEFLAGS = -C @abs_srcdir@ -f Makefile.bsd $(shell $(getflags))
ifneq (@abs_srcdir@,@abs_builddir@)
FMAKEFLAGS += MAKEOBJDIR=@abs_builddir@
endif
FMAKE = env -u MAKEFLAGS make $(FMAKEFLAGS)
modules-Linux:
mkdir -p $(sort $(dir $(spl-objs) $(spl-)))
mkdir -p $(sort $(dir $(zfs-objs) $(zfs-)))
$(MAKE) -C @LINUX_OBJ@ $(if @KERNEL_CC@,CC=@KERNEL_CC@) \
$(if @KERNEL_LD@,LD=@KERNEL_LD@) $(if @KERNEL_LLVM@,LLVM=@KERNEL_LLVM@) \
$(if @KERNEL_CROSS_COMPILE@,CROSS_COMPILE=@KERNEL_CROSS_COMPILE@) \
$(if @KERNEL_ARCH@,ARCH=@KERNEL_ARCH@) \
M="$$PWD" @KERNEL_MAKE@ CONFIG_ZFS=m modules
modules-FreeBSD:
+$(FMAKE)
modules-unknown:
@true
modules: modules-@ac_system@
clean-Linux:
@# Only cleanup the kernel build directories when CONFIG_KERNEL
@# is defined. This indicates that kernel modules should be built.
@CONFIG_KERNEL_TRUE@ $(MAKE) -C @LINUX_OBJ@ M="$$PWD" @KERNEL_MAKE@ clean
$(RM) @LINUX_SYMBOLS@ Module.markers
find . -name '*.ur-safe' -type f -delete
clean-FreeBSD:
+$(FMAKE) clean
clean: clean-@ac_system@
.PHONY: modules_uninstall-Linux-legacy
modules_uninstall-Linux-legacy:
$(RM) -r $(addprefix $(KMODDIR)/$(INSTALL_MOD_DIR)/,spl/ avl/ icp/ lua/ nvpair/ unicode/ zcommon/ zfs/ zstd/)
KMODDIR := $(INSTALL_MOD_PATH)/lib/modules/@LINUX_VERSION@
modules_install-Linux: modules_uninstall-Linux-legacy
@# Install the kernel modules
$(MAKE) -C @LINUX_OBJ@ M="$$PWD" modules_install \
INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) \
INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) \
$(if @KERNEL_ARCH@,ARCH=@KERNEL_ARCH@) \
KERNELRELEASE=@LINUX_VERSION@
@# Remove extraneous build products when packaging
- if [ -n "$(DESTDIR)" ]; then \
+ if [ -n "$(DESTDIR)" ] && [ "$(DONT_DELETE_MODULES_FILES)" != "1" ]; then \
find $(KMODDIR) -name 'modules.*' -delete; \
fi
@# Debian ships tiny fake System.map files that are
@# syntactically valid but just say
@# "if you want system.map go install this package"
@# Naturally, depmod is less than amused by this.
@# So if we find it missing or with one of these present,
@# we check for the alternate path for the System.map
sysmap=$(INSTALL_MOD_PATH)/boot/System.map-@LINUX_VERSION@; \
{ [ -f "$$sysmap" ] && [ $$(wc -l < "$$sysmap") -ge 100 ]; } || \
sysmap=$(INSTALL_MOD_PATH)/usr/lib/debug/boot/System.map-@LINUX_VERSION@; \
if [ -f $$sysmap ]; then \
- depmod -ae -F $$sysmap @LINUX_VERSION@; \
+ depmod -ae -F $$sysmap @LINUX_VERSION@ -b $(INSTALL_MOD_PATH)/; \
fi
modules_install-FreeBSD:
@# Install the kernel modules
+$(FMAKE) install
modules_install: modules_install-@ac_system@
data_install-Linux:
@mkdir -p $(DESTDIR)/@prefix@/src/zfs-@VERSION@/@LINUX_VERSION@
cp ../zfs.release ../zfs_config.h @LINUX_SYMBOLS@ $(DESTDIR)/@prefix@/src/zfs-@VERSION@/@LINUX_VERSION@
data_install-FreeBSD:
@
data_install: data_install-@ac_system@
modules_uninstall-Linux: modules_uninstall-Linux-legacy
@# Uninstall the kernel modules
$(RM) $(addprefix $(KMODDIR)/$(INSTALL_MOD_DIR)/,zfs.ko spl.ko)
modules_uninstall-FreeBSD:
@false
modules_uninstall: modules_uninstall-@ac_system@
data_uninstall-Linux:
$(RM) $(addprefix $(DESTDIR)/@prefix@/src/zfs-@VERSION@/@LINUX_VERSION@/,zfs.release zfs_config.h @LINUX_SYMBOLS@)
data_uninstall-FreeBSD:
@
data_uninstall: data_uninstall-@ac_system@
cppcheck-Linux:
@CPPCHECK@ -j@CPU_COUNT@ --std=c99 --quiet --force --error-exitcode=2 \
--inline-suppr \
--suppress=unmatchedSuppression \
--suppress=noValidConfiguration \
--enable=warning,information -D_KERNEL \
--include=@LINUX_OBJ@/include/generated/autoconf.h \
--include=@top_builddir@/zfs_config.h \
--config-exclude=@LINUX_OBJ@/include \
-i zstd/lib \
-I @LINUX_OBJ@/include \
-I @top_srcdir@/include/os/linux/kernel \
-I @top_srcdir@/include/os/linux/spl \
-I @top_srcdir@/include/os/linux/zfs \
-I @top_srcdir@/include \
avl icp lua nvpair unicode zcommon zfs zstd os/linux
cppcheck-FreeBSD:
@true
cppcheck: cppcheck-@ac_system@
distdir:
cd @srcdir@ && find . -name '*.[chS]' -exec sh -c 'for f; do mkdir -p $$distdir/$${f%/*}; cp @srcdir@/$$f $$distdir/$$f; done' _ {} +
cp @srcdir@/Makefile.bsd $$distdir/Makefile.bsd
gen-zstd-symbols:
for obj in $(addprefix zstd/,$(ZSTD_UPSTREAM_OBJS)); do echo; echo "/* $${obj#zstd/}: */"; @OBJDUMP@ -t $$obj | awk '$$2 == "g" && !/ zfs_/ {print "#define\t" $$6 " zfs_" $$6}' | sort; done >> zstd/include/zstd_compat_wrapper.h
check-zstd-symbols:
@OBJDUMP@ -t $(addprefix zstd/,$(ZSTD_UPSTREAM_OBJS)) | awk '/file format/ {print} $$2 == "g" && (!/ zfs_/ && !/ __pfx_zfs_/) {++ret; print} END {exit ret}'
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
index 7350b8a6d49f..bddb25a07204 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
@@ -1,804 +1,825 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/errno.h>
#include <sys/uio.h>
#include <sys/buf.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/conf.h>
#include <sys/cmn_err.h>
#include <sys/stat.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#include <sys/zap.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/arc_os.h>
#include <sys/dmu.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/sunddi.h>
#include <sys/policy.h>
#include <sys/zone.h>
#include <sys/nvpair.h>
#include <sys/mount.h>
#include <sys/taskqueue.h>
#include <sys/sdt.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_send.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_bookmark.h>
#include <sys/dsl_userhold.h>
#include <sys/zfeature.h>
#include <sys/zcp.h>
#include <sys/zio_checksum.h>
#include <sys/vdev_removal.h>
#include <sys/dsl_crypt.h>
#include <sys/zfs_ioctl_compat.h>
#include <sys/zfs_context.h>
#include <sys/arc_impl.h>
#include <sys/dsl_pool.h>
#include <sys/vmmeter.h>
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, arc, CTLFLAG_RW, 0,
"ZFS adaptive replacement cache");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, brt, CTLFLAG_RW, 0,
"ZFS Block Reference Table");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, condense, CTLFLAG_RW, 0, "ZFS condense");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf, CTLFLAG_RW, 0, "ZFS disk buf cache");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf_cache, CTLFLAG_RW, 0,
"ZFS disk buf cache");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, deadman, CTLFLAG_RW, 0, "ZFS deadman");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dedup, CTLFLAG_RW, 0, "ZFS dedup");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, l2arc, CTLFLAG_RW, 0, "ZFS l2arc");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, livelist, CTLFLAG_RW, 0, "ZFS livelist");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, lua, CTLFLAG_RW, 0, "ZFS lua");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, mg, CTLFLAG_RW, 0, "ZFS metaslab group");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, multihost, CTLFLAG_RW, 0,
"ZFS multihost protection");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, prefetch, CTLFLAG_RW, 0, "ZFS prefetch");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, reconstruct, CTLFLAG_RW, 0, "ZFS reconstruct");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, recv, CTLFLAG_RW, 0, "ZFS receive");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, send, CTLFLAG_RW, 0, "ZFS send");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, spa, CTLFLAG_RW, 0, "ZFS space allocation");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RW, 0, "ZFS TRIM");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS transaction group");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vnops, CTLFLAG_RW, 0, "ZFS VNOPS");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zevent, CTLFLAG_RW, 0, "ZFS event");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zil, CTLFLAG_RW, 0, "ZFS ZIL");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
SYSCTL_NODE(_vfs_zfs_livelist, OID_AUTO, condense, CTLFLAG_RW, 0,
"ZFS livelist condense");
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, file, CTLFLAG_RW, 0, "ZFS VDEV file");
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0,
"ZFS VDEV mirror");
SYSCTL_DECL(_vfs_zfs_version);
SYSCTL_CONST_STRING(_vfs_zfs_version, OID_AUTO, module, CTLFLAG_RD,
(ZFS_META_VERSION "-" ZFS_META_RELEASE), "OpenZFS module version");
/* arc.c */
int
param_set_arc_u64(SYSCTL_HANDLER_ARGS)
{
int err;
err = sysctl_handle_64(oidp, arg1, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
arc_tuning_update(B_TRUE);
return (0);
}
int
param_set_arc_int(SYSCTL_HANDLER_ARGS)
{
int err;
err = sysctl_handle_int(oidp, arg1, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
arc_tuning_update(B_TRUE);
return (0);
}
int
param_set_arc_max(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_arc_max;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < MIN_ARC_MAX || val <= arc_c_min ||
val >= arc_all_memory()))
return (SET_ERROR(EINVAL));
zfs_arc_max = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_max = arc_c_max;
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_max, "LU",
"Maximum ARC size in bytes (LEGACY)");
int
param_set_arc_min(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_arc_min;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < 2ULL << SPA_MAXBLOCKSHIFT || val > arc_c_max))
return (SET_ERROR(EINVAL));
zfs_arc_min = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_min = arc_c_min;
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_min, "LU",
"Minimum ARC size in bytes (LEGACY)");
extern uint_t zfs_arc_free_target;
int
param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
{
uint_t val;
int err;
val = zfs_arc_free_target;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < minfree)
return (EINVAL);
if (val > vm_cnt.v_page_count)
return (EINVAL);
zfs_arc_free_target = val;
return (0);
}
/*
* NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on
* pagedaemon initialization.
*/
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_free_target, "IU",
"Desired number of free pages below which ARC triggers reclaim"
" (LEGACY)");
int
param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
{
int err, val;
val = arc_no_grow_shift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < 0 || val >= arc_shrink_shift)
return (EINVAL);
arc_no_grow_shift = val;
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_no_grow_shift, "I",
"log2(fraction of ARC which must be free to allow growing) (LEGACY)");
extern uint64_t l2arc_write_max;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max,
CTLFLAG_RWTUN, &l2arc_write_max, 0,
"Max write bytes per interval (LEGACY)");
extern uint64_t l2arc_write_boost;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost,
CTLFLAG_RWTUN, &l2arc_write_boost, 0,
"Extra write bytes during device warmup (LEGACY)");
extern uint64_t l2arc_headroom;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom,
CTLFLAG_RWTUN, &l2arc_headroom, 0,
"Number of max device writes to precache (LEGACY)");
extern uint64_t l2arc_headroom_boost;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost,
CTLFLAG_RWTUN, &l2arc_headroom_boost, 0,
"Compressed l2arc_headroom multiplier (LEGACY)");
extern uint64_t l2arc_feed_secs;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs,
CTLFLAG_RWTUN, &l2arc_feed_secs, 0,
"Seconds between L2ARC writing (LEGACY)");
extern uint64_t l2arc_feed_min_ms;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms,
CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0,
"Min feed interval in milliseconds (LEGACY)");
extern int l2arc_noprefetch;
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch,
CTLFLAG_RWTUN, &l2arc_noprefetch, 0,
"Skip caching prefetched buffers (LEGACY)");
extern int l2arc_feed_again;
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again,
CTLFLAG_RWTUN, &l2arc_feed_again, 0,
"Turbo L2ARC warmup (LEGACY)");
extern int l2arc_norw;
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw,
CTLFLAG_RWTUN, &l2arc_norw, 0,
"No reads during writes (LEGACY)");
static int
param_get_arc_state_size(SYSCTL_HANDLER_ARGS)
{
arc_state_t *state = (arc_state_t *)arg1;
int64_t val;
val = zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]) +
zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]);
return (sysctl_handle_64(oidp, &val, 0, req));
}
extern arc_state_t ARC_anon;
SYSCTL_PROC(_vfs_zfs, OID_AUTO, anon_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_anon, 0, param_get_arc_state_size, "Q",
"size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in anonymous state");
extern arc_state_t ARC_mru;
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mru, 0, param_get_arc_state_size, "Q",
"size of mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mru state");
extern arc_state_t ARC_mru_ghost;
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_ghost_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mru_ghost, 0, param_get_arc_state_size, "Q",
"size of mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mru ghost state");
extern arc_state_t ARC_mfu;
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mfu, 0, param_get_arc_state_size, "Q",
"size of mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mfu state");
extern arc_state_t ARC_mfu_ghost;
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_ghost_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mfu_ghost, 0, param_get_arc_state_size, "Q",
"size of mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mfu ghost state");
extern arc_state_t ARC_uncached;
SYSCTL_PROC(_vfs_zfs, OID_AUTO, uncached_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_uncached, 0, param_get_arc_state_size, "Q",
"size of uncached state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_metadata_esize, CTLFLAG_RD,
&ARC_uncached.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in uncached state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_data_esize, CTLFLAG_RD,
&ARC_uncached.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in uncached state");
extern arc_state_t ARC_l2c_only;
SYSCTL_PROC(_vfs_zfs, OID_AUTO, l2c_only_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_l2c_only, 0, param_get_arc_state_size, "Q",
"size of l2c_only state");
/* dbuf.c */
/* dmu.c */
/* dmu_zfetch.c */
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)");
extern uint32_t zfetch_max_distance;
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance,
CTLFLAG_RWTUN, &zfetch_max_distance, 0,
"Max bytes to prefetch per stream (LEGACY)");
extern uint32_t zfetch_max_idistance;
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance,
CTLFLAG_RWTUN, &zfetch_max_idistance, 0,
"Max bytes to prefetch indirects for per stream (LEGACY)");
/* dsl_pool.c */
/* dnode.c */
/* dsl_scan.c */
/* metaslab.c */
int
param_set_active_allocator(SYSCTL_HANDLER_ARGS)
{
char buf[16];
int rc;
if (req->newptr == NULL)
strlcpy(buf, zfs_active_allocator, sizeof (buf));
rc = sysctl_handle_string(oidp, buf, sizeof (buf), req);
if (rc || req->newptr == NULL)
return (rc);
if (strcmp(buf, zfs_active_allocator) == 0)
return (0);
return (param_set_active_allocator_common(buf));
}
/*
* In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each
* transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
* around the disk. So a sane default for the space map block size
* is 8~16K.
*/
extern int zfs_metaslab_sm_blksz_no_log;
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log,
CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_no_log, 0,
"Block size for space map in pools with log space map disabled. "
"Power of 2 greater than 4096.");
/*
* When the log space map feature is enabled, we accumulate a lot of
* changes per metaslab that are flushed once in a while so we benefit
* from a bigger block size like 128K for the metaslab space maps.
*/
extern int zfs_metaslab_sm_blksz_with_log;
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log,
CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_with_log, 0,
"Block size for space map in pools with log space map enabled. "
"Power of 2 greater than 4096.");
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
extern uint_t zfs_condense_pct;
SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct,
CTLFLAG_RWTUN, &zfs_condense_pct, 0,
"Condense on-disk spacemap when it is more than this many percents"
" of in-memory counterpart");
extern uint_t zfs_remove_max_segment;
SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment,
CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
"Largest contiguous segment ZFS will attempt to allocate when removing"
" a device");
extern int zfs_removal_suspend_progress;
SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress,
CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0,
"Ensures certain actions can happen while in the middle of a removal");
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
extern uint64_t metaslab_df_alloc_threshold;
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold,
CTLFLAG_RWTUN, &metaslab_df_alloc_threshold, 0,
"Minimum size which forces the dynamic allocator to change its"
" allocation strategy");
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
extern uint_t metaslab_df_free_pct;
SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct,
CTLFLAG_RWTUN, &metaslab_df_free_pct, 0,
"The minimum free space, in percent, which must be available in a"
" space map to continue allocations in a first-fit fashion");
/* mmp.c */
int
param_set_multihost_interval(SYSCTL_HANDLER_ARGS)
{
int err;
err = sysctl_handle_64(oidp, &zfs_multihost_interval, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (spa_mode_global != SPA_MODE_UNINIT)
mmp_signal_all_threads();
return (0);
}
/* spa.c */
extern int zfs_ccw_retry_interval;
SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval,
CTLFLAG_RWTUN, &zfs_ccw_retry_interval, 0,
"Configuration cache file write, retry after failure, interval"
" (seconds)");
extern uint64_t zfs_max_missing_tvds_cachefile;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile,
CTLFLAG_RWTUN, &zfs_max_missing_tvds_cachefile, 0,
"Allow importing pools with missing top-level vdevs in cache file");
extern uint64_t zfs_max_missing_tvds_scan;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan,
CTLFLAG_RWTUN, &zfs_max_missing_tvds_scan, 0,
"Allow importing pools with missing top-level vdevs during scan");
/* spa_misc.c */
extern int zfs_flags;
static int
sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS)
{
int err, val;
val = zfs_flags;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
/*
* ZFS_DEBUG_MODIFY must be enabled prior to boot so all
* arc buffers in the system have the necessary additional
* checksum data. However, it is safe to disable at any
* time.
*/
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
val &= ~ZFS_DEBUG_MODIFY;
zfs_flags = val;
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, debugflags,
CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, NULL, 0,
sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing.");
int
param_set_deadman_synctime(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_deadman_synctime_ms;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
zfs_deadman_synctime_ms = val;
spa_set_deadman_synctime(MSEC2NSEC(zfs_deadman_synctime_ms));
return (0);
}
int
param_set_deadman_ziotime(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_deadman_ziotime_ms;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
zfs_deadman_ziotime_ms = val;
spa_set_deadman_ziotime(MSEC2NSEC(zfs_deadman_synctime_ms));
return (0);
}
int
param_set_deadman_failmode(SYSCTL_HANDLER_ARGS)
{
char buf[16];
int rc;
if (req->newptr == NULL)
strlcpy(buf, zfs_deadman_failmode, sizeof (buf));
rc = sysctl_handle_string(oidp, buf, sizeof (buf), req);
if (rc || req->newptr == NULL)
return (rc);
if (strcmp(buf, zfs_deadman_failmode) == 0)
return (0);
if (strcmp(buf, "wait") == 0)
zfs_deadman_failmode = "wait";
if (strcmp(buf, "continue") == 0)
zfs_deadman_failmode = "continue";
if (strcmp(buf, "panic") == 0)
zfs_deadman_failmode = "panic";
return (-param_set_deadman_failmode_common(buf));
}
+int
+param_set_raidz_impl(SYSCTL_HANDLER_ARGS)
+{
+ const size_t bufsize = 128;
+ char *buf;
+ int rc;
+
+ buf = malloc(bufsize, M_SOLARIS, M_WAITOK | M_ZERO);
+ if (req->newptr == NULL)
+ vdev_raidz_impl_get(buf, bufsize);
+
+ rc = sysctl_handle_string(oidp, buf, bufsize, req);
+ if (rc || req->newptr == NULL) {
+ free(buf, M_SOLARIS);
+ return (rc);
+ }
+ rc = vdev_raidz_impl_set(buf);
+ free(buf, M_SOLARIS);
+ return (rc);
+}
+
int
param_set_slop_shift(SYSCTL_HANDLER_ARGS)
{
int val;
int err;
val = spa_slop_shift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < 1 || val > 31)
return (EINVAL);
spa_slop_shift = val;
return (0);
}
/* spacemap.c */
extern int space_map_ibs;
SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN,
&space_map_ibs, 0, "Space map indirect block shift");
/* vdev.c */
int
param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
{
int val;
int err;
val = zfs_vdev_min_auto_ashift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift)
return (SET_ERROR(EINVAL));
zfs_vdev_min_auto_ashift = val;
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
param_set_min_auto_ashift, "IU",
"Min ashift used when creating new top-level vdev. (LEGACY)");
int
param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
{
int val;
int err;
val = zfs_vdev_max_auto_ashift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift)
return (SET_ERROR(EINVAL));
zfs_vdev_max_auto_ashift = val;
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift),
param_set_max_auto_ashift, "IU",
"Max ashift used when optimizing for logical -> physical sector size on"
" new top-level vdevs. (LEGACY)");
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
extern int zfs_vdev_dtl_sm_blksz;
SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_dtl_sm_blksz, 0,
"Block size for DTL space map. Power of 2 greater than 4096.");
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
extern int zfs_vdev_standard_sm_blksz;
SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0,
"Block size for standard space map. Power of 2 greater than 4096.");
extern int vdev_validate_skip;
SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
CTLFLAG_RDTUN, &vdev_validate_skip, 0,
"Enable to bypass vdev_validate().");
/* vdev_mirror.c */
/* vdev_queue.c */
extern uint_t zfs_vdev_max_active;
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
CTLFLAG_RWTUN, &zfs_vdev_max_active, 0,
"The maximum number of I/Os of all types active for each device."
" (LEGACY)");
/* zio.c */
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata,
CTLFLAG_RDTUN, &zio_exclude_metadata, 0,
"Exclude metadata buffers from dumps as well");
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
index 33c7d0879741..da5513c50189 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
@@ -1,1444 +1,1445 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#define SPL_KMEM_CACHE_IMPLEMENTING
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/taskq.h>
#include <sys/timer.h>
#include <sys/vmem.h>
#include <sys/wait.h>
#include <sys/string.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/prefetch.h>
/*
* Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
* with smp_mb__{before,after}_atomic() because they were redundant. This is
* only used inside our SLAB allocator, so we implement an internal wrapper
* here to give us smp_mb__{before,after}_atomic() on older kernels.
*/
#ifndef smp_mb__before_atomic
#define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
#endif
#ifndef smp_mb__after_atomic
#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
#endif
/*
* Cache magazines are an optimization designed to minimize the cost of
* allocating memory. They do this by keeping a per-cpu cache of recently
* freed objects, which can then be reallocated without taking a lock. This
* can improve performance on highly contended caches. However, because
* objects in magazines will prevent otherwise empty slabs from being
* immediately released this may not be ideal for low memory machines.
*
* For this reason spl_kmem_cache_magazine_size can be used to set a maximum
* magazine size. When this value is set to 0 the magazine size will be
* automatically determined based on the object size. Otherwise magazines
* will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
* may never be entirely disabled in this implementation.
*/
static unsigned int spl_kmem_cache_magazine_size = 0;
module_param(spl_kmem_cache_magazine_size, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
"Default magazine size (2-256), set automatically (0)");
static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
module_param(spl_kmem_cache_max_size, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
/*
* For small objects the Linux slab allocator should be used to make the most
* efficient use of the memory. However, large objects are not supported by
* the Linux slab and therefore the SPL implementation is preferred. A cutoff
* of 16K was determined to be optimal for architectures using 4K pages and
* to also work well on architecutres using larger 64K page sizes.
*/
static unsigned int spl_kmem_cache_slab_limit =
SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE;
module_param(spl_kmem_cache_slab_limit, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
"Objects less than N bytes use the Linux slab");
/*
* The number of threads available to allocate new slabs for caches. This
* should not need to be tuned but it is available for performance analysis.
*/
static unsigned int spl_kmem_cache_kmem_threads = 4;
module_param(spl_kmem_cache_kmem_threads, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
"Number of spl_kmem_cache threads");
/*
* Slab allocation interfaces
*
* While the Linux slab implementation was inspired by the Solaris
* implementation I cannot use it to emulate the Solaris APIs. I
* require two features which are not provided by the Linux slab.
*
* 1) Constructors AND destructors. Recent versions of the Linux
* kernel have removed support for destructors. This is a deal
* breaker for the SPL which contains particularly expensive
* initializers for mutex's, condition variables, etc. We also
* require a minimal level of cleanup for these data types unlike
* many Linux data types which do need to be explicitly destroyed.
*
* 2) Virtual address space backed slab. Callers of the Solaris slab
* expect it to work well for both small are very large allocations.
* Because of memory fragmentation the Linux slab which is backed
* by kmalloc'ed memory performs very badly when confronted with
* large numbers of large allocations. Basing the slab on the
* virtual address space removes the need for contiguous pages
* and greatly improve performance for large allocations.
*
* For these reasons, the SPL has its own slab implementation with
* the needed features. It is not as highly optimized as either the
* Solaris or Linux slabs, but it should get me most of what is
* needed until it can be optimized or obsoleted by another approach.
*
* One serious concern I do have about this method is the relatively
* small virtual address space on 32bit arches. This will seriously
* constrain the size of the slab caches and their performance.
*/
struct list_head spl_kmem_cache_list; /* List of caches */
struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
static void *
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
{
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
if (skc->skc_flags & KMC_RECLAIMABLE)
lflags |= __GFP_RECLAIMABLE;
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
/* Resulting allocated memory will be page aligned */
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
return (ptr);
}
static void
kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
{
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
/*
* The Linux direct reclaim path uses this out of band value to
* determine if forward progress is being made. Normally this is
* incremented by kmem_freepages() which is part of the various
* Linux slab implementations. However, since we are using none
* of that infrastructure we are responsible for incrementing it.
*/
if (current->reclaim_state)
#ifdef HAVE_RECLAIM_STATE_RECLAIMED
current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
#else
current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
#endif
vfree(ptr);
}
/*
* Required space for each aligned sks.
*/
static inline uint32_t
spl_sks_size(spl_kmem_cache_t *skc)
{
return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
skc->skc_obj_align, uint32_t));
}
/*
* Required space for each aligned object.
*/
static inline uint32_t
spl_obj_size(spl_kmem_cache_t *skc)
{
uint32_t align = skc->skc_obj_align;
return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
}
uint64_t
spl_kmem_cache_inuse(kmem_cache_t *cache)
{
return (cache->skc_obj_total);
}
EXPORT_SYMBOL(spl_kmem_cache_inuse);
uint64_t
spl_kmem_cache_entry_size(kmem_cache_t *cache)
{
return (cache->skc_obj_size);
}
EXPORT_SYMBOL(spl_kmem_cache_entry_size);
/*
* Lookup the spl_kmem_object_t for an object given that object.
*/
static inline spl_kmem_obj_t *
spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
{
return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
skc->skc_obj_align, uint32_t));
}
/*
* It's important that we pack the spl_kmem_obj_t structure and the
* actual objects in to one large address space to minimize the number
* of calls to the allocator. It is far better to do a few large
* allocations and then subdivide it ourselves. Now which allocator
* we use requires balancing a few trade offs.
*
* For small objects we use kmem_alloc() because as long as you are
* only requesting a small number of pages (ideally just one) its cheap.
* However, when you start requesting multiple pages with kmem_alloc()
* it gets increasingly expensive since it requires contiguous pages.
* For this reason we shift to vmem_alloc() for slabs of large objects
* which removes the need for contiguous pages. We do not use
* vmem_alloc() in all cases because there is significant locking
* overhead in __get_vm_area_node(). This function takes a single
* global lock when acquiring an available virtual address range which
* serializes all vmem_alloc()'s for all slab caches. Using slightly
* different allocation functions for small and large objects should
* give us the best of both worlds.
*
* +------------------------+
* | spl_kmem_slab_t --+-+ |
* | skc_obj_size <-+ | |
* | spl_kmem_obj_t | |
* | skc_obj_size <---+ |
* | spl_kmem_obj_t | |
* | ... v |
* +------------------------+
*/
static spl_kmem_slab_t *
spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
void *base;
uint32_t obj_size;
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
return (NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
sks->sks_objs = skc->skc_slab_objs;
sks->sks_age = jiffies;
sks->sks_cache = skc;
INIT_LIST_HEAD(&sks->sks_list);
INIT_LIST_HEAD(&sks->sks_free_list);
sks->sks_ref = 0;
obj_size = spl_obj_size(skc);
for (int i = 0; i < sks->sks_objs; i++) {
void *obj = base + spl_sks_size(skc) + (i * obj_size);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
sko->sko_addr = obj;
sko->sko_magic = SKO_MAGIC;
sko->sko_slab = sks;
INIT_LIST_HEAD(&sko->sko_list);
list_add_tail(&sko->sko_list, &sks->sks_free_list);
}
return (sks);
}
/*
* Remove a slab from complete or partial list, it must be called with
* the 'skc->skc_lock' held but the actual free must be performed
* outside the lock to prevent deadlocking on vmem addresses.
*/
static void
spl_slab_free(spl_kmem_slab_t *sks,
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
skc = sks->sks_cache;
ASSERT(skc->skc_magic == SKC_MAGIC);
/*
* Update slab/objects counters in the cache, then remove the
* slab from the skc->skc_partial_list. Finally add the slab
* and all its objects in to the private work lists where the
* destructors will be called and the memory freed to the system.
*/
skc->skc_obj_total -= sks->sks_objs;
skc->skc_slab_total--;
list_del(&sks->sks_list);
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
}
/*
* Reclaim empty slabs at the end of the partial list.
*/
static void
spl_slab_reclaim(spl_kmem_cache_t *skc)
{
spl_kmem_slab_t *sks = NULL, *m = NULL;
spl_kmem_obj_t *sko = NULL, *n = NULL;
LIST_HEAD(sks_list);
LIST_HEAD(sko_list);
/*
* Empty slabs and objects must be moved to a private list so they
* can be safely freed outside the spin lock. All empty slabs are
* at the end of skc->skc_partial_list, therefore once a non-empty
* slab is found we can stop scanning.
*/
spin_lock(&skc->skc_lock);
list_for_each_entry_safe_reverse(sks, m,
&skc->skc_partial_list, sks_list) {
if (sks->sks_ref > 0)
break;
spl_slab_free(sks, &sks_list, &sko_list);
}
spin_unlock(&skc->skc_lock);
/*
* The following two loops ensure all the object destructors are run,
* and the slabs themselves are freed. This is all done outside the
* skc->skc_lock since this allows the destructor to sleep, and
* allows us to perform a conditional reschedule when a freeing a
* large number of objects and slabs back to the system.
*/
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
ASSERT(sko->sko_magic == SKO_MAGIC);
}
list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
ASSERT(sks->sks_magic == SKS_MAGIC);
kv_free(skc, sks, skc->skc_slab_size);
}
}
static spl_kmem_emergency_t *
spl_emergency_search(struct rb_root *root, void *obj)
{
struct rb_node *node = root->rb_node;
spl_kmem_emergency_t *ske;
unsigned long address = (unsigned long)obj;
while (node) {
ske = container_of(node, spl_kmem_emergency_t, ske_node);
if (address < ske->ske_obj)
node = node->rb_left;
else if (address > ske->ske_obj)
node = node->rb_right;
else
return (ske);
}
return (NULL);
}
static int
spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
spl_kmem_emergency_t *ske_tmp;
unsigned long address = ske->ske_obj;
while (*new) {
ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
parent = *new;
if (address < ske_tmp->ske_obj)
new = &((*new)->rb_left);
else if (address > ske_tmp->ske_obj)
new = &((*new)->rb_right);
else
return (0);
}
rb_link_node(&ske->ske_node, parent, new);
rb_insert_color(&ske->ske_node, root);
return (1);
}
/*
* Allocate a single emergency object and track it in a red black tree.
*/
static int
spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
{
gfp_t lflags = kmem_flags_convert(flags);
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
int empty;
/* Last chance use a partial slab if one now exists */
spin_lock(&skc->skc_lock);
empty = list_empty(&skc->skc_partial_list);
spin_unlock(&skc->skc_lock);
if (!empty)
return (-EEXIST);
if (skc->skc_flags & KMC_RECLAIMABLE)
lflags |= __GFP_RECLAIMABLE;
ske = kmalloc(sizeof (*ske), lflags);
if (ske == NULL)
return (-ENOMEM);
ske->ske_obj = __get_free_pages(lflags, order);
if (ske->ske_obj == 0) {
kfree(ske);
return (-ENOMEM);
}
spin_lock(&skc->skc_lock);
empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
if (likely(empty)) {
skc->skc_obj_total++;
skc->skc_obj_emergency++;
if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
skc->skc_obj_emergency_max = skc->skc_obj_emergency;
}
spin_unlock(&skc->skc_lock);
if (unlikely(!empty)) {
free_pages(ske->ske_obj, order);
kfree(ske);
return (-EINVAL);
}
*obj = (void *)ske->ske_obj;
return (0);
}
/*
* Locate the passed object in the red black tree and free it.
*/
static int
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
if (ske) {
rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
skc->skc_obj_emergency--;
skc->skc_obj_total--;
}
spin_unlock(&skc->skc_lock);
if (ske == NULL)
return (-ENOENT);
free_pages(ske->ske_obj, order);
kfree(ske);
return (0);
}
/*
* Release objects from the per-cpu magazine back to their slab. The flush
* argument contains the max number of entries to remove from the magazine.
*/
static void
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
spin_lock(&skc->skc_lock);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
int count = MIN(flush, skm->skm_avail);
for (int i = 0; i < count; i++)
spl_cache_shrink(skc, skm->skm_objs[i]);
skm->skm_avail -= count;
memmove(skm->skm_objs, &(skm->skm_objs[count]),
sizeof (void *) * skm->skm_avail);
spin_unlock(&skc->skc_lock);
}
/*
* Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
* When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
* for very small objects we may end up with more than this so as not
* to waste space in the minimal allocation of a single page.
*/
static int
spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
{
uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
sks_size = spl_sks_size(skc);
obj_size = spl_obj_size(skc);
max_size = (spl_kmem_cache_max_size * 1024 * 1024);
tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
if (tgt_size <= max_size) {
tgt_objs = (tgt_size - sks_size) / obj_size;
} else {
tgt_objs = (max_size - sks_size) / obj_size;
tgt_size = (tgt_objs * obj_size) + sks_size;
}
if (tgt_objs == 0)
return (-ENOSPC);
*objs = tgt_objs;
*size = tgt_size;
return (0);
}
/*
* Make a guess at reasonable per-cpu magazine size based on the size of
* each object and the cost of caching N of them in each magazine. Long
* term this should really adapt based on an observed usage heuristic.
*/
static int
spl_magazine_size(spl_kmem_cache_t *skc)
{
uint32_t obj_size = spl_obj_size(skc);
int size;
if (spl_kmem_cache_magazine_size > 0)
return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
/* Per-magazine sizes below assume a 4Kib page size */
if (obj_size > (PAGE_SIZE * 256))
size = 4; /* Minimum 4Mib per-magazine */
else if (obj_size > (PAGE_SIZE * 32))
size = 16; /* Minimum 2Mib per-magazine */
else if (obj_size > (PAGE_SIZE))
size = 64; /* Minimum 256Kib per-magazine */
else if (obj_size > (PAGE_SIZE / 4))
size = 128; /* Minimum 128Kib per-magazine */
else
size = 256;
return (size);
}
/*
* Allocate a per-cpu magazine to associate with a specific core.
*/
static spl_kmem_magazine_t *
spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
{
spl_kmem_magazine_t *skm;
int size = sizeof (spl_kmem_magazine_t) +
sizeof (void *) * skc->skc_mag_size;
skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
if (skm) {
skm->skm_magic = SKM_MAGIC;
skm->skm_avail = 0;
skm->skm_size = skc->skc_mag_size;
skm->skm_refill = skc->skc_mag_refill;
skm->skm_cache = skc;
skm->skm_cpu = cpu;
}
return (skm);
}
/*
* Free a per-cpu magazine associated with a specific core.
*/
static void
spl_magazine_free(spl_kmem_magazine_t *skm)
{
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kfree(skm);
}
/*
* Create all pre-cpu magazines of reasonable sizes.
*/
static int
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i = 0;
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
for_each_possible_cpu(i) {
skc->skc_mag[i] = spl_magazine_alloc(skc, i);
if (!skc->skc_mag[i]) {
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
kfree(skc->skc_mag);
return (-ENOMEM);
}
}
return (0);
}
/*
* Destroy all pre-cpu magazines.
*/
static void
spl_magazine_destroy(spl_kmem_cache_t *skc)
{
spl_kmem_magazine_t *skm;
int i = 0;
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
for_each_possible_cpu(i) {
skm = skc->skc_mag[i];
spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
kfree(skc->skc_mag);
}
/*
* Create a object cache based on the following arguments:
* name cache name
* size cache object size
* align cache object alignment
* ctor cache object constructor
* dtor cache object destructor
* reclaim cache object reclaim
* priv cache private data for ctor/dtor/reclaim
* vmp unused must be NULL
* flags
* KMC_KVMEM Force kvmem backed SPL cache
* KMC_SLAB Force Linux slab backed cache
* KMC_NODEBUG Disable debugging (unsupported)
* KMC_RECLAIMABLE Memory can be freed under pressure
*/
spl_kmem_cache_t *
spl_kmem_cache_create(const char *name, size_t size, size_t align,
spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
void *priv, void *vmp, int flags)
{
gfp_t lflags = kmem_flags_convert(KM_SLEEP);
spl_kmem_cache_t *skc;
int rc;
/*
* Unsupported flags
*/
ASSERT(vmp == NULL);
ASSERT(reclaim == NULL);
might_sleep();
skc = kzalloc(sizeof (*skc), lflags);
if (skc == NULL)
return (NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
skc->skc_name = kmalloc(skc->skc_name_size, lflags);
if (skc->skc_name == NULL) {
kfree(skc);
return (NULL);
}
strlcpy(skc->skc_name, name, skc->skc_name_size);
skc->skc_ctor = ctor;
skc->skc_dtor = dtor;
skc->skc_private = priv;
skc->skc_vmp = vmp;
skc->skc_linux_cache = NULL;
skc->skc_flags = flags;
skc->skc_obj_size = size;
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
atomic_set(&skc->skc_ref, 0);
INIT_LIST_HEAD(&skc->skc_list);
INIT_LIST_HEAD(&skc->skc_complete_list);
INIT_LIST_HEAD(&skc->skc_partial_list);
skc->skc_emergency_tree = RB_ROOT;
spin_lock_init(&skc->skc_lock);
init_waitqueue_head(&skc->skc_waitq);
skc->skc_slab_fail = 0;
skc->skc_slab_create = 0;
skc->skc_slab_destroy = 0;
skc->skc_slab_total = 0;
skc->skc_slab_alloc = 0;
skc->skc_slab_max = 0;
skc->skc_obj_total = 0;
skc->skc_obj_alloc = 0;
skc->skc_obj_max = 0;
skc->skc_obj_deadlock = 0;
skc->skc_obj_emergency = 0;
skc->skc_obj_emergency_max = 0;
rc = percpu_counter_init(&skc->skc_linux_alloc, 0, GFP_KERNEL);
if (rc != 0) {
+ kfree(skc->skc_name);
kfree(skc);
return (NULL);
}
/*
* Verify the requested alignment restriction is sane.
*/
if (align) {
VERIFY(ISP2(align));
VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
VERIFY3U(align, <=, PAGE_SIZE);
skc->skc_obj_align = align;
}
/*
* When no specific type of slab is requested (kmem, vmem, or
* linuxslab) then select a cache type based on the object size
* and default tunables.
*/
if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
if (spl_kmem_cache_slab_limit &&
size <= (size_t)spl_kmem_cache_slab_limit) {
/*
* Objects smaller than spl_kmem_cache_slab_limit can
* use the Linux slab for better space-efficiency.
*/
skc->skc_flags |= KMC_SLAB;
} else {
/*
* All other objects are considered large and are
* placed on kvmem backed slabs.
*/
skc->skc_flags |= KMC_KVMEM;
}
}
/*
* Given the type of slab allocate the required resources.
*/
if (skc->skc_flags & KMC_KVMEM) {
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
goto out;
rc = spl_magazine_create(skc);
if (rc)
goto out;
} else {
unsigned long slabflags = 0;
if (size > spl_kmem_cache_slab_limit)
goto out;
if (skc->skc_flags & KMC_RECLAIMABLE)
slabflags |= SLAB_RECLAIM_ACCOUNT;
skc->skc_linux_cache = kmem_cache_create_usercopy(
skc->skc_name, size, align, slabflags, 0, size, NULL);
if (skc->skc_linux_cache == NULL)
goto out;
}
down_write(&spl_kmem_cache_sem);
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
return (skc);
out:
kfree(skc->skc_name);
percpu_counter_destroy(&skc->skc_linux_alloc);
kfree(skc);
return (NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
/*
* Register a move callback for cache defragmentation.
* XXX: Unimplemented but harmless to stub out for now.
*/
void
spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
kmem_cbrc_t (move)(void *, void *, size_t, void *))
{
ASSERT(move != NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_set_move);
/*
* Destroy a cache and all objects associated with the cache.
*/
void
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
taskqid_t id;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
down_write(&spl_kmem_cache_sem);
list_del_init(&skc->skc_list);
up_write(&spl_kmem_cache_sem);
/* Cancel any and wait for any pending delayed tasks */
VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
spin_lock(&skc->skc_lock);
id = skc->skc_taskqid;
spin_unlock(&skc->skc_lock);
taskq_cancel_id(spl_kmem_cache_taskq, id);
/*
* Wait until all current callers complete, this is mainly
* to catch the case where a low memory situation triggers a
* cache reaping action which races with this destroy.
*/
wait_event(wq, atomic_read(&skc->skc_ref) == 0);
if (skc->skc_flags & KMC_KVMEM) {
spl_magazine_destroy(skc);
spl_slab_reclaim(skc);
} else {
ASSERT(skc->skc_flags & KMC_SLAB);
kmem_cache_destroy(skc->skc_linux_cache);
}
spin_lock(&skc->skc_lock);
/*
* Validate there are no objects in use and free all the
* spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
*/
ASSERT3U(skc->skc_slab_alloc, ==, 0);
ASSERT3U(skc->skc_obj_alloc, ==, 0);
ASSERT3U(skc->skc_slab_total, ==, 0);
ASSERT3U(skc->skc_obj_total, ==, 0);
ASSERT3U(skc->skc_obj_emergency, ==, 0);
ASSERT(list_empty(&skc->skc_complete_list));
ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
percpu_counter_destroy(&skc->skc_linux_alloc);
spin_unlock(&skc->skc_lock);
kfree(skc->skc_name);
kfree(skc);
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
/*
* Allocate an object from a slab attached to the cache. This is used to
* repopulate the per-cpu magazine caches in batches when they run low.
*/
static void *
spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
{
spl_kmem_obj_t *sko;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(sks->sks_magic == SKS_MAGIC);
sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
ASSERT(sko->sko_magic == SKO_MAGIC);
ASSERT(sko->sko_addr != NULL);
/* Remove from sks_free_list */
list_del_init(&sko->sko_list);
sks->sks_age = jiffies;
sks->sks_ref++;
skc->skc_obj_alloc++;
/* Track max obj usage statistics */
if (skc->skc_obj_alloc > skc->skc_obj_max)
skc->skc_obj_max = skc->skc_obj_alloc;
/* Track max slab usage statistics */
if (sks->sks_ref == 1) {
skc->skc_slab_alloc++;
if (skc->skc_slab_alloc > skc->skc_slab_max)
skc->skc_slab_max = skc->skc_slab_alloc;
}
return (sko->sko_addr);
}
/*
* Generic slab allocation function to run by the global work queues.
* It is responsible for allocating a new slab, linking it in to the list
* of partial slabs, and then waking any waiters.
*/
static int
__spl_cache_grow(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
fstrans_cookie_t cookie = spl_fstrans_mark();
sks = spl_slab_alloc(skc, flags);
spl_fstrans_unmark(cookie);
spin_lock(&skc->skc_lock);
if (sks) {
skc->skc_slab_total++;
skc->skc_obj_total += sks->sks_objs;
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
smp_mb__before_atomic();
clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
smp_mb__after_atomic();
}
spin_unlock(&skc->skc_lock);
return (sks == NULL ? -ENOMEM : 0);
}
static void
spl_cache_grow_work(void *data)
{
spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
spl_kmem_cache_t *skc = ska->ska_cache;
int error = __spl_cache_grow(skc, ska->ska_flags);
atomic_dec(&skc->skc_ref);
smp_mb__before_atomic();
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
smp_mb__after_atomic();
if (error == 0)
wake_up_all(&skc->skc_waitq);
kfree(ska);
}
/*
* Returns non-zero when a new slab should be available.
*/
static int
spl_cache_grow_wait(spl_kmem_cache_t *skc)
{
return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
}
/*
* No available objects on any slabs, create a new slab. Note that this
* functionality is disabled for KMC_SLAB caches which are backed by the
* Linux slab.
*/
static int
spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
{
int remaining, rc = 0;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
*obj = NULL;
/*
* Since we can't sleep attempt an emergency allocation to satisfy
* the request. The only alterative is to fail the allocation but
* it's preferable try. The use of KM_NOSLEEP is expected to be rare.
*/
if (flags & KM_NOSLEEP)
return (spl_emergency_alloc(skc, flags, obj));
might_sleep();
/*
* Before allocating a new slab wait for any reaping to complete and
* then return so the local magazine can be rechecked for new objects.
*/
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
rc = wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
TASK_UNINTERRUPTIBLE);
return (rc ? rc : -EAGAIN);
}
/*
* Note: It would be nice to reduce the overhead of context switch
* and improve NUMA locality, by trying to allocate a new slab in the
* current process context with KM_NOSLEEP flag.
*
* However, this can't be applied to vmem/kvmem due to a bug that
* spl_vmalloc() doesn't honor gfp flags in page table allocation.
*/
/*
* This is handled by dispatching a work request to the global work
* queue. This allows us to asynchronously allocate a new slab while
* retaining the ability to safely fall back to a smaller synchronous
* allocations to ensure forward progress is always maintained.
*/
if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
spl_kmem_alloc_t *ska;
ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
if (ska == NULL) {
clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_all(&skc->skc_waitq);
return (-ENOMEM);
}
atomic_inc(&skc->skc_ref);
ska->ska_cache = skc;
ska->ska_flags = flags;
taskq_init_ent(&ska->ska_tqe);
taskq_dispatch_ent(spl_kmem_cache_taskq,
spl_cache_grow_work, ska, 0, &ska->ska_tqe);
}
/*
* The goal here is to only detect the rare case where a virtual slab
* allocation has deadlocked. We must be careful to minimize the use
* of emergency objects which are more expensive to track. Therefore,
* we set a very long timeout for the asynchronous allocation and if
* the timeout is reached the cache is flagged as deadlocked. From
* this point only new emergency objects will be allocated until the
* asynchronous allocation completes and clears the deadlocked flag.
*/
if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
rc = spl_emergency_alloc(skc, flags, obj);
} else {
remaining = wait_event_timeout(skc->skc_waitq,
spl_cache_grow_wait(skc), HZ / 10);
if (!remaining) {
spin_lock(&skc->skc_lock);
if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
skc->skc_obj_deadlock++;
}
spin_unlock(&skc->skc_lock);
}
rc = -ENOMEM;
}
return (rc);
}
/*
* Refill a per-cpu magazine with objects from the slabs for this cache.
* Ideally the magazine can be repopulated using existing objects which have
* been released, however if we are unable to locate enough free objects new
* slabs of objects will be created. On success NULL is returned, otherwise
* the address of a single emergency object is returned for use by the caller.
*/
static void *
spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
{
spl_kmem_slab_t *sks;
int count = 0, rc, refill;
void *obj = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
while (refill > 0) {
/* No slabs available we may need to grow the cache */
if (list_empty(&skc->skc_partial_list)) {
spin_unlock(&skc->skc_lock);
local_irq_enable();
rc = spl_cache_grow(skc, flags, &obj);
local_irq_disable();
/* Emergency object for immediate use by caller */
if (rc == 0 && obj != NULL)
return (obj);
if (rc)
goto out;
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
goto out;
/*
* Potentially rescheduled to the same CPU but
* allocations may have occurred from this CPU while
* we were sleeping so recalculate max refill.
*/
refill = MIN(refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
continue;
}
/* Grab the next available slab */
sks = list_entry((&skc->skc_partial_list)->next,
spl_kmem_slab_t, sks_list);
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref < sks->sks_objs);
ASSERT(!list_empty(&sks->sks_free_list));
/*
* Consume as many objects as needed to refill the requested
* cache. We must also be careful not to overfill it.
*/
while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
++count) {
ASSERT(skm->skm_avail < skm->skm_size);
ASSERT(count < skm->skm_size);
skm->skm_objs[skm->skm_avail++] =
spl_cache_obj(skc, sks);
}
/* Move slab to skc_complete_list when full */
if (sks->sks_ref == sks->sks_objs) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_complete_list);
}
}
spin_unlock(&skc->skc_lock);
out:
return (NULL);
}
/*
* Release an object back to the slab from which it came.
*/
static void
spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
sko = spl_sko_from_obj(skc, obj);
ASSERT(sko->sko_magic == SKO_MAGIC);
sks = sko->sko_slab;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_cache == skc);
list_add(&sko->sko_list, &sks->sks_free_list);
sks->sks_age = jiffies;
sks->sks_ref--;
skc->skc_obj_alloc--;
/*
* Move slab to skc_partial_list when no longer full. Slabs
* are added to the head to keep the partial list is quasi-full
* sorted order. Fuller at the head, emptier at the tail.
*/
if (sks->sks_ref == (sks->sks_objs - 1)) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_partial_list);
}
/*
* Move empty slabs to the end of the partial list so
* they can be easily found and freed during reclamation.
*/
if (sks->sks_ref == 0) {
list_del(&sks->sks_list);
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
skc->skc_slab_alloc--;
}
}
/*
* Allocate an object from the per-cpu magazine, or if the magazine
* is empty directly allocate from a slab and repopulate the magazine.
*/
void *
spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_magazine_t *skm;
void *obj = NULL;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/*
* Allocate directly from a Linux slab. All optimizations are left
* to the underlying cache we only need to guarantee that KM_SLEEP
* callers will never fail.
*/
if (skc->skc_flags & KMC_SLAB) {
struct kmem_cache *slc = skc->skc_linux_cache;
do {
obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
} while ((obj == NULL) && !(flags & KM_NOSLEEP));
if (obj != NULL) {
/*
* Even though we leave everything up to the
* underlying cache we still keep track of
* how many objects we've allocated in it for
* better debuggability.
*/
percpu_counter_inc(&skc->skc_linux_alloc);
}
goto ret;
}
local_irq_disable();
restart:
/*
* Safe to update per-cpu structure without lock, but
* in the restart case we must be careful to reacquire
* the local magazine since this may have changed
* when we need to grow the cache.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
if (likely(skm->skm_avail)) {
/* Object available in CPU cache, use it */
obj = skm->skm_objs[--skm->skm_avail];
} else {
obj = spl_cache_refill(skc, skm, flags);
if ((obj == NULL) && !(flags & KM_NOSLEEP))
goto restart;
local_irq_enable();
goto ret;
}
local_irq_enable();
ASSERT(obj);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
ret:
/* Pre-emptively migrate object to CPU L1 cache */
if (obj) {
if (obj && skc->skc_ctor)
skc->skc_ctor(obj, skc->skc_private, flags);
else
prefetchw(obj);
}
return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
/*
* Free an object back to the local per-cpu magazine, there is no
* guarantee that this is the same magazine the object was originally
* allocated from. We may need to flush entire from the magazine
* back to the slabs to make space.
*/
void
spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_magazine_t *skm;
unsigned long flags;
int do_reclaim = 0;
int do_emergency = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/*
* Run the destructor
*/
if (skc->skc_dtor)
skc->skc_dtor(obj, skc->skc_private);
/*
* Free the object from the Linux underlying Linux slab.
*/
if (skc->skc_flags & KMC_SLAB) {
kmem_cache_free(skc->skc_linux_cache, obj);
percpu_counter_dec(&skc->skc_linux_alloc);
return;
}
/*
* While a cache has outstanding emergency objects all freed objects
* must be checked. However, since emergency objects will never use
* a virtual address these objects can be safely excluded as an
* optimization.
*/
if (!is_vmalloc_addr(obj)) {
spin_lock(&skc->skc_lock);
do_emergency = (skc->skc_obj_emergency > 0);
spin_unlock(&skc->skc_lock);
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
return;
}
local_irq_save(flags);
/*
* Safe to update per-cpu structure without lock, but
* no remote memory allocation tracking is being performed
* it is entirely possible to allocate an object from one
* CPU cache and return it to another.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
/*
* Per-CPU cache full, flush it to make space for this object,
* this may result in an empty slab which can be reclaimed once
* interrupts are re-enabled.
*/
if (unlikely(skm->skm_avail >= skm->skm_size)) {
spl_cache_flush(skc, skm, skm->skm_refill);
do_reclaim = 1;
}
/* Available space in cache, use it */
skm->skm_objs[skm->skm_avail++] = obj;
local_irq_restore(flags);
if (do_reclaim)
spl_slab_reclaim(skc);
}
EXPORT_SYMBOL(spl_kmem_cache_free);
/*
* Depending on how many and which objects are released it may simply
* repopulate the local magazine which will then need to age-out. Objects
* which cannot fit in the magazine will be released back to their slabs
* which will also need to age out before being released. This is all just
* best effort and we do not want to thrash creating and destroying slabs.
*/
void
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
{
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
if (skc->skc_flags & KMC_SLAB)
return;
atomic_inc(&skc->skc_ref);
/*
* Prevent concurrent cache reaping when contended.
*/
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
goto out;
/* Reclaim from the magazine and free all now empty slabs. */
unsigned long irq_flags;
local_irq_save(irq_flags);
spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
spl_cache_flush(skc, skm, skm->skm_avail);
local_irq_restore(irq_flags);
spl_slab_reclaim(skc);
clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
out:
atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
/*
* This is stubbed out for code consistency with other platforms. There
* is existing logic to prevent concurrent reaping so while this is ugly
* it should do no harm.
*/
int
spl_kmem_cache_reap_active(void)
{
return (0);
}
EXPORT_SYMBOL(spl_kmem_cache_reap_active);
/*
* Reap all free slabs from all registered caches.
*/
void
spl_kmem_reap(void)
{
spl_kmem_cache_t *skc = NULL;
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
spl_kmem_cache_reap_now(skc);
}
up_read(&spl_kmem_cache_sem);
}
EXPORT_SYMBOL(spl_kmem_reap);
int
spl_kmem_cache_init(void)
{
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
spl_kmem_cache_kmem_threads, maxclsyspri,
spl_kmem_cache_kmem_threads * 8, INT_MAX,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
if (spl_kmem_cache_taskq == NULL)
return (-ENOMEM);
return (0);
}
void
spl_kmem_cache_fini(void)
{
taskq_destroy(spl_kmem_cache_taskq);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/vdev_raidz.c b/sys/contrib/openzfs/module/os/linux/zfs/vdev_raidz.c
new file mode 100644
index 000000000000..0b34ca52fb90
--- /dev/null
+++ b/sys/contrib/openzfs/module/os/linux/zfs/vdev_raidz.c
@@ -0,0 +1,42 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or https://opensource.org/licenses/CDDL-1.0.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/* Copyright (C) 2025 ConnectWise */
+
+#include <sys/zfs_context.h>
+#include <sys/spa.h>
+#include <sys/zio.h>
+#include <sys/vdev_impl.h>
+#include <sys/vdev_raidz.h>
+
+int
+param_get_raidz_impl(char *buf, zfs_kernel_param_t *kp)
+{
+ return (vdev_raidz_impl_get(buf, PAGE_SIZE));
+}
+
+int
+param_set_raidz_impl(const char *val, zfs_kernel_param_t *kp)
+{
+ int error;
+
+ error = vdev_raidz_impl_set(val);
+ return (error);
+}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_uio.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_uio.c
index db85b626f12a..1a815c62b19a 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_uio.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_uio.c
@@ -1,657 +1,673 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
/*
* University Copyright- Copyright (c) 1982, 1986, 1988
* The Regents of the University of California
* All Rights Reserved
*
* University Acknowledgment- Portions of this document are derived from
* software developed by the University of California, Berkeley, and its
* contributors.
*/
/*
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
*/
#ifdef _KERNEL
#include <sys/errno.h>
#include <sys/vmem.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/uio_impl.h>
#include <sys/sysmacros.h>
#include <sys/string.h>
#include <sys/zfs_refcount.h>
#include <sys/zfs_debug.h>
#include <linux/kmap_compat.h>
#include <linux/uaccess.h>
#include <linux/pagemap.h>
#include <linux/mman.h>
/*
* Move "n" bytes at byte address "p"; "rw" indicates the direction
* of the move, and the I/O parameters are provided in "uio", which is
* update to reflect the data which was moved. Returns 0 on success or
* a non-zero errno on failure.
*/
static int
zfs_uiomove_iov(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{
const struct iovec *iov = uio->uio_iov;
size_t skip = uio->uio_skip;
ulong_t cnt;
ASSERT3S(uio->uio_segflg, ==, UIO_SYSSPACE);
while (n && uio->uio_resid) {
cnt = MIN(iov->iov_len - skip, n);
if (rw == UIO_READ)
memcpy(iov->iov_base + skip, p, cnt);
else
memcpy(p, iov->iov_base + skip, cnt);
skip += cnt;
if (skip == iov->iov_len) {
skip = 0;
uio->uio_iov = (++iov);
uio->uio_iovcnt--;
}
uio->uio_skip = skip;
uio->uio_resid -= cnt;
uio->uio_loffset += cnt;
p = (caddr_t)p + cnt;
n -= cnt;
}
return (0);
}
static int
zfs_uiomove_bvec_impl(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{
const struct bio_vec *bv = uio->uio_bvec;
size_t skip = uio->uio_skip;
ulong_t cnt;
while (n && uio->uio_resid) {
void *paddr;
cnt = MIN(bv->bv_len - skip, n);
paddr = zfs_kmap_local(bv->bv_page);
if (rw == UIO_READ) {
/* Copy from buffer 'p' to the bvec data */
memcpy(paddr + bv->bv_offset + skip, p, cnt);
} else {
/* Copy from bvec data to buffer 'p' */
memcpy(p, paddr + bv->bv_offset + skip, cnt);
}
zfs_kunmap_local(paddr);
skip += cnt;
if (skip == bv->bv_len) {
skip = 0;
uio->uio_bvec = (++bv);
uio->uio_iovcnt--;
}
uio->uio_skip = skip;
uio->uio_resid -= cnt;
uio->uio_loffset += cnt;
p = (caddr_t)p + cnt;
n -= cnt;
}
return (0);
}
static void
zfs_copy_bvec(void *p, size_t skip, size_t cnt, zfs_uio_rw_t rw,
struct bio_vec *bv)
{
void *paddr;
paddr = zfs_kmap_local(bv->bv_page);
if (rw == UIO_READ) {
/* Copy from buffer 'p' to the bvec data */
memcpy(paddr + bv->bv_offset + skip, p, cnt);
} else {
/* Copy from bvec data to buffer 'p' */
memcpy(p, paddr + bv->bv_offset + skip, cnt);
}
zfs_kunmap_local(paddr);
}
/*
* Copy 'n' bytes of data between the buffer p[] and the data represented
* by the request in the uio.
*/
static int
zfs_uiomove_bvec_rq(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{
struct request *rq = uio->rq;
struct bio_vec bv;
struct req_iterator iter;
size_t this_seg_start; /* logical offset */
size_t this_seg_end; /* logical offset */
size_t skip_in_seg;
size_t copy_from_seg;
size_t orig_loffset;
int copied = 0;
/*
* Get the original logical offset of this entire request (because
* uio->uio_loffset will be modified over time).
*/
orig_loffset = io_offset(NULL, rq);
this_seg_start = orig_loffset;
rq_for_each_segment(bv, rq, iter) {
/*
* Lookup what the logical offset of the last byte of this
* segment is.
*/
this_seg_end = this_seg_start + bv.bv_len - 1;
/*
* We only need to operate on segments that have data we're
* copying.
*/
if (uio->uio_loffset >= this_seg_start &&
uio->uio_loffset <= this_seg_end) {
/*
* Some, or all, of the data in this segment needs to be
* copied.
*/
/*
* We may be not be copying from the first byte in the
* segment. Figure out how many bytes to skip copying
* from the beginning of this segment.
*/
skip_in_seg = uio->uio_loffset - this_seg_start;
/*
* Calculate the total number of bytes from this
* segment that we will be copying.
*/
copy_from_seg = MIN(bv.bv_len - skip_in_seg, n);
/* Copy the bytes */
zfs_copy_bvec(p, skip_in_seg, copy_from_seg, rw, &bv);
p = ((char *)p) + copy_from_seg;
n -= copy_from_seg;
uio->uio_resid -= copy_from_seg;
uio->uio_loffset += copy_from_seg;
copied = 1; /* We copied some data */
}
this_seg_start = this_seg_end + 1;
}
if (!copied) {
/* Didn't copy anything */
uio->uio_resid = 0;
}
return (0);
}
static int
zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{
if (uio->rq != NULL)
return (zfs_uiomove_bvec_rq(p, n, rw, uio));
return (zfs_uiomove_bvec_impl(p, n, rw, uio));
}
static int
zfs_uiomove_iter(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio,
boolean_t revert)
{
size_t cnt = MIN(n, uio->uio_resid);
if (uio->uio_skip)
iov_iter_advance(uio->uio_iter, uio->uio_skip);
if (rw == UIO_READ)
cnt = copy_to_iter(p, cnt, uio->uio_iter);
else
cnt = copy_from_iter(p, cnt, uio->uio_iter);
/*
* When operating on a full pipe no bytes are processed.
* In which case return EFAULT which is converted to EAGAIN
* by the kernel's generic_file_splice_read() function.
*/
if (cnt == 0)
return (EFAULT);
/*
* Revert advancing the uio_iter. This is set by zfs_uiocopy()
* to avoid consuming the uio and its iov_iter structure.
*/
if (revert)
iov_iter_revert(uio->uio_iter, cnt);
uio->uio_resid -= cnt;
uio->uio_loffset += cnt;
return (0);
}
int
zfs_uiomove(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{
if (uio->uio_segflg == UIO_BVEC)
return (zfs_uiomove_bvec(p, n, rw, uio));
else if (uio->uio_segflg == UIO_ITER)
return (zfs_uiomove_iter(p, n, rw, uio, B_FALSE));
else
return (zfs_uiomove_iov(p, n, rw, uio));
}
EXPORT_SYMBOL(zfs_uiomove);
/*
* Fault in the pages of the first n bytes specified by the uio structure.
* 1 byte in each page is touched and the uio struct is unmodified. Any
* error will terminate the process as this is only a best attempt to get
* the pages resident.
*/
int
zfs_uio_prefaultpages(ssize_t n, zfs_uio_t *uio)
{
if (uio->uio_segflg == UIO_SYSSPACE || uio->uio_segflg == UIO_BVEC ||
(uio->uio_extflg & UIO_DIRECT)) {
/*
* There's never a need to fault in kernel pages or Direct I/O
* write pages. Direct I/O write pages have been pinned in so
* there is never a time for these pages a fault will occur.
*/
return (0);
} else {
ASSERT3S(uio->uio_segflg, ==, UIO_ITER);
/*
* At least a Linux 4.18 kernel, iov_iter_fault_in_readable()
* can be relied on to fault in user pages when referenced.
*/
if (iov_iter_fault_in_readable(uio->uio_iter, n))
return (EFAULT);
}
return (0);
}
EXPORT_SYMBOL(zfs_uio_prefaultpages);
/*
* The same as zfs_uiomove() but doesn't modify uio structure.
* return in cbytes how many bytes were copied.
*/
int
zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes)
{
zfs_uio_t uio_copy;
int ret;
memcpy(&uio_copy, uio, sizeof (zfs_uio_t));
if (uio->uio_segflg == UIO_BVEC)
ret = zfs_uiomove_bvec(p, n, rw, &uio_copy);
else if (uio->uio_segflg == UIO_ITER)
ret = zfs_uiomove_iter(p, n, rw, &uio_copy, B_TRUE);
else
ret = zfs_uiomove_iov(p, n, rw, &uio_copy);
*cbytes = uio->uio_resid - uio_copy.uio_resid;
return (ret);
}
EXPORT_SYMBOL(zfs_uiocopy);
/*
* Drop the next n chars out of *uio.
*/
void
zfs_uioskip(zfs_uio_t *uio, size_t n)
{
if (n > uio->uio_resid)
return;
/*
* When using a uio with a struct request, we simply
* use uio_loffset as a pointer to the next logical byte to
* copy in the request. We don't have to do any fancy
* accounting with uio_bvec/uio_iovcnt since we don't use
* them.
*/
if (uio->uio_segflg == UIO_BVEC && uio->rq == NULL) {
uio->uio_skip += n;
while (uio->uio_iovcnt &&
uio->uio_skip >= uio->uio_bvec->bv_len) {
uio->uio_skip -= uio->uio_bvec->bv_len;
uio->uio_bvec++;
uio->uio_iovcnt--;
}
} else if (uio->uio_segflg == UIO_ITER) {
iov_iter_advance(uio->uio_iter, n);
} else {
ASSERT3S(uio->uio_segflg, ==, UIO_SYSSPACE);
uio->uio_skip += n;
while (uio->uio_iovcnt &&
uio->uio_skip >= uio->uio_iov->iov_len) {
uio->uio_skip -= uio->uio_iov->iov_len;
uio->uio_iov++;
uio->uio_iovcnt--;
}
}
uio->uio_loffset += n;
uio->uio_resid -= n;
}
EXPORT_SYMBOL(zfs_uioskip);
/*
* Check if the uio is page-aligned in memory.
*/
boolean_t
zfs_uio_page_aligned(zfs_uio_t *uio)
{
boolean_t aligned = B_TRUE;
if (uio->uio_segflg == UIO_SYSSPACE) {
const struct iovec *iov = uio->uio_iov;
size_t skip = uio->uio_skip;
for (int i = uio->uio_iovcnt; i > 0; iov++, i--) {
uintptr_t addr = (uintptr_t)(iov->iov_base + skip);
size_t size = iov->iov_len - skip;
if ((addr & (PAGE_SIZE - 1)) ||
(size & (PAGE_SIZE - 1))) {
aligned = B_FALSE;
break;
}
skip = 0;
}
} else if (uio->uio_segflg == UIO_ITER) {
unsigned long alignment =
iov_iter_alignment(uio->uio_iter);
aligned = IS_P2ALIGNED(alignment, PAGE_SIZE);
} else {
/* Currently not supported */
aligned = B_FALSE;
}
return (aligned);
}
-
#if defined(HAVE_ZERO_PAGE_GPL_ONLY) || !defined(_LP64)
#define ZFS_MARKEED_PAGE 0x0
#define IS_ZFS_MARKED_PAGE(_p) 0
#define zfs_mark_page(_p)
#define zfs_unmark_page(_p)
#define IS_ZERO_PAGE(_p) 0
#else
/*
* Mark pages to know if they were allocated to replace ZERO_PAGE() for
* Direct I/O writes.
*/
#define ZFS_MARKED_PAGE 0x5a465350414745 /* ASCII: ZFSPAGE */
#define IS_ZFS_MARKED_PAGE(_p) \
(page_private(_p) == (unsigned long)ZFS_MARKED_PAGE)
#define IS_ZERO_PAGE(_p) ((_p) == ZERO_PAGE(0))
static inline void
zfs_mark_page(struct page *page)
{
ASSERT3P(page, !=, NULL);
get_page(page);
SetPagePrivate(page);
set_page_private(page, ZFS_MARKED_PAGE);
}
static inline void
zfs_unmark_page(struct page *page)
{
ASSERT3P(page, !=, NULL);
set_page_private(page, 0UL);
ClearPagePrivate(page);
put_page(page);
}
#endif /* HAVE_ZERO_PAGE_GPL_ONLY || !_LP64 */
-#if !defined(HAVE_PIN_USER_PAGES_UNLOCKED)
static void
zfs_uio_dio_check_for_zero_page(zfs_uio_t *uio)
{
ASSERT3P(uio->uio_dio.pages, !=, NULL);
for (long i = 0; i < uio->uio_dio.npages; i++) {
struct page *p = uio->uio_dio.pages[i];
lock_page(p);
if (IS_ZERO_PAGE(p)) {
/*
* If the user page points the kernels ZERO_PAGE() a
* new zero filled page will just be allocated so the
* contents of the page can not be changed by the user
* while a Direct I/O write is taking place.
*/
gfp_t gfp_zero_page = __GFP_NOWARN | GFP_NOIO |
__GFP_ZERO | GFP_KERNEL;
ASSERT0(IS_ZFS_MARKED_PAGE(p));
unlock_page(p);
put_page(p);
uio->uio_dio.pages[i] =
__page_cache_alloc(gfp_zero_page);
zfs_mark_page(uio->uio_dio.pages[i]);
} else {
unlock_page(p);
}
}
}
-#endif
void
zfs_uio_free_dio_pages(zfs_uio_t *uio, zfs_uio_rw_t rw)
{
ASSERT(uio->uio_extflg & UIO_DIRECT);
ASSERT3P(uio->uio_dio.pages, !=, NULL);
+ if (uio->uio_dio.pinned) {
#if defined(HAVE_PIN_USER_PAGES_UNLOCKED)
- unpin_user_pages(uio->uio_dio.pages, uio->uio_dio.npages);
-#else
- for (long i = 0; i < uio->uio_dio.npages; i++) {
- struct page *p = uio->uio_dio.pages[i];
+ unpin_user_pages(uio->uio_dio.pages, uio->uio_dio.npages);
+#endif
+ } else {
+ for (long i = 0; i < uio->uio_dio.npages; i++) {
+ struct page *p = uio->uio_dio.pages[i];
- if (IS_ZFS_MARKED_PAGE(p)) {
- zfs_unmark_page(p);
- __free_page(p);
- continue;
- }
+ if (IS_ZFS_MARKED_PAGE(p)) {
+ zfs_unmark_page(p);
+ __free_page(p);
+ continue;
+ }
- put_page(p);
+ put_page(p);
+ }
}
-#endif
+
vmem_free(uio->uio_dio.pages,
uio->uio_dio.npages * sizeof (struct page *));
}
#if defined(HAVE_PIN_USER_PAGES_UNLOCKED)
static int
zfs_uio_pin_user_pages(zfs_uio_t *uio, zfs_uio_rw_t rw)
{
long res;
size_t skip = uio->uio_skip;
size_t len = uio->uio_resid - skip;
unsigned int gup_flags = 0;
unsigned long addr;
unsigned long nr_pages;
/*
* Kernel 6.2 introduced the FOLL_PCI_P2PDMA flag. This flag could
* possibly be used here in the future to allow for P2P operations with
* user pages.
*/
if (rw == UIO_READ)
gup_flags = FOLL_WRITE;
if (len == 0)
return (0);
+ uio->uio_dio.pinned = B_TRUE;
#if defined(HAVE_ITER_IS_UBUF)
if (iter_is_ubuf(uio->uio_iter)) {
nr_pages = DIV_ROUND_UP(len, PAGE_SIZE);
addr = (unsigned long)uio->uio_iter->ubuf + skip;
res = pin_user_pages_unlocked(addr, nr_pages,
&uio->uio_dio.pages[uio->uio_dio.npages], gup_flags);
if (res < 0) {
return (SET_ERROR(-res));
} else if (len != (res * PAGE_SIZE)) {
uio->uio_dio.npages += res;
return (SET_ERROR(EFAULT));
}
uio->uio_dio.npages += res;
return (0);
}
#endif
const struct iovec *iovp = zfs_uio_iter_iov(uio->uio_iter);
for (int i = 0; i < uio->uio_iovcnt; i++) {
size_t amt = iovp->iov_len - skip;
if (amt == 0) {
iovp++;
skip = 0;
continue;
}
addr = (unsigned long)iovp->iov_base + skip;
nr_pages = DIV_ROUND_UP(amt, PAGE_SIZE);
res = pin_user_pages_unlocked(addr, nr_pages,
&uio->uio_dio.pages[uio->uio_dio.npages], gup_flags);
if (res < 0) {
return (SET_ERROR(-res));
} else if (amt != (res * PAGE_SIZE)) {
uio->uio_dio.npages += res;
return (SET_ERROR(EFAULT));
}
len -= amt;
uio->uio_dio.npages += res;
skip = 0;
iovp++;
};
ASSERT0(len);
return (0);
}
+#endif
-#else
static int
zfs_uio_get_dio_pages_iov_iter(zfs_uio_t *uio, zfs_uio_rw_t rw)
{
size_t start;
size_t wanted = uio->uio_resid - uio->uio_skip;
ssize_t rollback = 0;
ssize_t cnt;
unsigned maxpages = DIV_ROUND_UP(wanted, PAGE_SIZE);
while (wanted) {
+#if defined(HAVE_IOV_ITER_GET_PAGES2)
+ cnt = iov_iter_get_pages2(uio->uio_iter,
+ &uio->uio_dio.pages[uio->uio_dio.npages],
+ wanted, maxpages, &start);
+#else
cnt = iov_iter_get_pages(uio->uio_iter,
&uio->uio_dio.pages[uio->uio_dio.npages],
wanted, maxpages, &start);
+#endif
if (cnt < 0) {
iov_iter_revert(uio->uio_iter, rollback);
return (SET_ERROR(-cnt));
}
/*
* All Direct I/O operations must be page aligned.
*/
ASSERT(IS_P2ALIGNED(start, PAGE_SIZE));
uio->uio_dio.npages += DIV_ROUND_UP(cnt, PAGE_SIZE);
rollback += cnt;
wanted -= cnt;
+#if !defined(HAVE_IOV_ITER_GET_PAGES2)
+ /*
+ * iov_iter_get_pages2() advances the iov_iter on success.
+ */
iov_iter_advance(uio->uio_iter, cnt);
+#endif
}
ASSERT3U(rollback, ==, uio->uio_resid - uio->uio_skip);
iov_iter_revert(uio->uio_iter, rollback);
return (0);
}
-#endif /* HAVE_PIN_USER_PAGES_UNLOCKED */
/*
* This function pins user pages. In the event that the user pages were not
* successfully pinned an error value is returned.
*
* On success, 0 is returned.
*/
int
zfs_uio_get_dio_pages_alloc(zfs_uio_t *uio, zfs_uio_rw_t rw)
{
int error = 0;
long npages = DIV_ROUND_UP(uio->uio_resid, PAGE_SIZE);
size_t size = npages * sizeof (struct page *);
if (uio->uio_segflg == UIO_ITER) {
uio->uio_dio.pages = vmem_alloc(size, KM_SLEEP);
#if defined(HAVE_PIN_USER_PAGES_UNLOCKED)
- error = zfs_uio_pin_user_pages(uio, rw);
+ if (zfs_user_backed_iov_iter(uio->uio_iter))
+ error = zfs_uio_pin_user_pages(uio, rw);
+ else
+ error = zfs_uio_get_dio_pages_iov_iter(uio, rw);
#else
error = zfs_uio_get_dio_pages_iov_iter(uio, rw);
#endif
} else {
return (SET_ERROR(EOPNOTSUPP));
}
ASSERT3S(uio->uio_dio.npages, >=, 0);
if (error) {
+ if (uio->uio_dio.pinned) {
#if defined(HAVE_PIN_USER_PAGES_UNLOCKED)
- unpin_user_pages(uio->uio_dio.pages, uio->uio_dio.npages);
-#else
- for (long i = 0; i < uio->uio_dio.npages; i++)
- put_page(uio->uio_dio.pages[i]);
+ unpin_user_pages(uio->uio_dio.pages,
+ uio->uio_dio.npages);
#endif
+ } else {
+ for (long i = 0; i < uio->uio_dio.npages; i++)
+ put_page(uio->uio_dio.pages[i]);
+ }
+
vmem_free(uio->uio_dio.pages, size);
return (error);
} else {
ASSERT3S(uio->uio_dio.npages, ==, npages);
}
-#if !defined(HAVE_PIN_USER_PAGES_UNLOCKED)
- if (rw == UIO_WRITE)
+ if (rw == UIO_WRITE && !uio->uio_dio.pinned)
zfs_uio_dio_check_for_zero_page(uio);
-#endif
uio->uio_extflg |= UIO_DIRECT;
return (0);
}
#endif /* _KERNEL */
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
index fe64bc710387..11438ab61475 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
@@ -1,609 +1,615 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2011 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* LLNL-CODE-403049.
* Rewritten for Linux by:
* Rohan Puri <rohan.puri15@gmail.com>
* Brian Behlendorf <behlendorf1@llnl.gov>
*/
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_ctldir.h>
#include <sys/zpl.h>
#include <sys/dmu.h>
#include <sys/dsl_dataset.h>
#include <sys/zap.h>
/*
* Common open routine. Disallow any write access.
*/
static int
zpl_common_open(struct inode *ip, struct file *filp)
{
if (blk_mode_is_open_write(filp->f_mode))
return (-EACCES);
return (generic_file_open(ip, filp));
}
/*
* Get root directory contents.
*/
static int
zpl_root_iterate(struct file *filp, struct dir_context *ctx)
{
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
int error = 0;
if (zfsvfs->z_show_ctldir == ZFS_SNAPDIR_DISABLED) {
return (SET_ERROR(ENOENT));
}
if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
return (error);
if (!dir_emit_dots(filp, ctx))
goto out;
if (ctx->pos == 2) {
if (!dir_emit(ctx, ZFS_SNAPDIR_NAME,
strlen(ZFS_SNAPDIR_NAME), ZFSCTL_INO_SNAPDIR, DT_DIR))
goto out;
ctx->pos++;
}
if (ctx->pos == 3) {
if (!dir_emit(ctx, ZFS_SHAREDIR_NAME,
strlen(ZFS_SHAREDIR_NAME), ZFSCTL_INO_SHARES, DT_DIR))
goto out;
ctx->pos++;
}
out:
zpl_exit(zfsvfs, FTAG);
return (error);
}
/*
* Get root directory attributes.
*/
static int
#ifdef HAVE_IDMAP_IOPS_GETATTR
zpl_root_getattr_impl(struct mnt_idmap *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#elif defined(HAVE_USERNS_IOPS_GETATTR)
zpl_root_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#else
zpl_root_getattr_impl(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
#endif
{
(void) request_mask, (void) query_flags;
struct inode *ip = path->dentry->d_inode;
#if (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
#ifdef HAVE_GENERIC_FILLATTR_USERNS
generic_fillattr(user_ns, ip, stat);
#elif defined(HAVE_GENERIC_FILLATTR_IDMAP)
generic_fillattr(user_ns, ip, stat);
#elif defined(HAVE_GENERIC_FILLATTR_IDMAP_REQMASK)
generic_fillattr(user_ns, request_mask, ip, stat);
#else
(void) user_ns;
#endif
#else
generic_fillattr(ip, stat);
#endif
stat->atime = current_time(ip);
return (0);
}
ZPL_GETATTR_WRAPPER(zpl_root_getattr);
static struct dentry *
zpl_root_lookup(struct inode *dip, struct dentry *dentry, unsigned int flags)
{
cred_t *cr = CRED();
struct inode *ip;
int error;
crhold(cr);
error = -zfsctl_root_lookup(dip, dname(dentry), &ip, 0, cr, NULL, NULL);
ASSERT3S(error, <=, 0);
crfree(cr);
if (error) {
if (error == -ENOENT)
return (d_splice_alias(NULL, dentry));
else
return (ERR_PTR(error));
}
return (d_splice_alias(ip, dentry));
}
/*
* The '.zfs' control directory file and inode operations.
*/
const struct file_operations zpl_fops_root = {
.open = zpl_common_open,
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = zpl_root_iterate,
};
const struct inode_operations zpl_ops_root = {
.lookup = zpl_root_lookup,
.getattr = zpl_root_getattr,
};
static struct vfsmount *
zpl_snapdir_automount(struct path *path)
{
int error;
error = -zfsctl_snapshot_mount(path, 0);
if (error)
return (ERR_PTR(error));
/*
* Rather than returning the new vfsmount for the snapshot we must
* return NULL to indicate a mount collision. This is done because
* the user space mount calls do_add_mount() which adds the vfsmount
* to the name space. If we returned the new mount here it would be
* added again to the vfsmount list resulting in list corruption.
*/
return (NULL);
}
/*
* Negative dentries must always be revalidated so newly created snapshots
* can be detected and automounted. Normal dentries should be kept because
* as of the 3.18 kernel revaliding the mountpoint dentry will result in
* the snapshot being immediately unmounted.
*/
+#ifdef HAVE_D_REVALIDATE_4ARGS
+static int
+zpl_snapdir_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
+#else
static int
zpl_snapdir_revalidate(struct dentry *dentry, unsigned int flags)
+#endif
{
return (!!dentry->d_inode);
}
static dentry_operations_t zpl_dops_snapdirs = {
/*
* Auto mounting of snapshots is only supported for 2.6.37 and
* newer kernels. Prior to this kernel the ops->follow_link()
* callback was used as a hack to trigger the mount. The
* resulting vfsmount was then explicitly grafted in to the
* name space. While it might be possible to add compatibility
* code to accomplish this it would require considerable care.
*/
.d_automount = zpl_snapdir_automount,
.d_revalidate = zpl_snapdir_revalidate,
};
static struct dentry *
zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry,
unsigned int flags)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
struct inode *ip = NULL;
int error;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfsctl_snapdir_lookup(dip, dname(dentry), &ip,
0, cr, NULL, NULL);
ASSERT3S(error, <=, 0);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error && error != -ENOENT)
return (ERR_PTR(error));
ASSERT(error == 0 || ip == NULL);
d_clear_d_op(dentry);
d_set_d_op(dentry, &zpl_dops_snapdirs);
dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
return (d_splice_alias(ip, dentry));
}
static int
zpl_snapdir_iterate(struct file *filp, struct dir_context *ctx)
{
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
fstrans_cookie_t cookie;
char snapname[MAXNAMELEN];
boolean_t case_conflict;
uint64_t id, pos;
int error = 0;
if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
return (error);
cookie = spl_fstrans_mark();
if (!dir_emit_dots(filp, ctx))
goto out;
/* Start the position at 0 if it already emitted . and .. */
pos = (ctx->pos == 2 ? 0 : ctx->pos);
while (error == 0) {
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = -dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN,
snapname, &id, &pos, &case_conflict);
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error)
goto out;
if (!dir_emit(ctx, snapname, strlen(snapname),
ZFSCTL_INO_SHARES - id, DT_DIR))
goto out;
ctx->pos = pos;
}
out:
spl_fstrans_unmark(cookie);
zpl_exit(zfsvfs, FTAG);
if (error == -ENOENT)
return (0);
return (error);
}
static int
#ifdef HAVE_IOPS_RENAME_USERNS
zpl_snapdir_rename2(struct user_namespace *user_ns, struct inode *sdip,
struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry,
unsigned int flags)
#elif defined(HAVE_IOPS_RENAME_IDMAP)
zpl_snapdir_rename2(struct mnt_idmap *user_ns, struct inode *sdip,
struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry,
unsigned int flags)
#else
zpl_snapdir_rename2(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry, unsigned int flags)
#endif
{
cred_t *cr = CRED();
int error;
/* We probably don't want to support renameat2(2) in ctldir */
if (flags)
return (-EINVAL);
crhold(cr);
error = -zfsctl_snapdir_rename(sdip, dname(sdentry),
tdip, dname(tdentry), cr, 0);
ASSERT3S(error, <=, 0);
crfree(cr);
return (error);
}
#if (!defined(HAVE_RENAME_WANTS_FLAGS) && \
!defined(HAVE_IOPS_RENAME_USERNS) && \
!defined(HAVE_IOPS_RENAME_IDMAP))
static int
zpl_snapdir_rename(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry)
{
return (zpl_snapdir_rename2(sdip, sdentry, tdip, tdentry, 0));
}
#endif
static int
zpl_snapdir_rmdir(struct inode *dip, struct dentry *dentry)
{
cred_t *cr = CRED();
int error;
crhold(cr);
error = -zfsctl_snapdir_remove(dip, dname(dentry), cr, 0);
ASSERT3S(error, <=, 0);
crfree(cr);
return (error);
}
static int
#ifdef HAVE_IOPS_MKDIR_USERNS
zpl_snapdir_mkdir(struct user_namespace *user_ns, struct inode *dip,
struct dentry *dentry, umode_t mode)
#elif defined(HAVE_IOPS_MKDIR_IDMAP)
zpl_snapdir_mkdir(struct mnt_idmap *user_ns, struct inode *dip,
struct dentry *dentry, umode_t mode)
#else
zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
#endif
{
cred_t *cr = CRED();
vattr_t *vap;
struct inode *ip;
int error;
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
#if (defined(HAVE_IOPS_MKDIR_USERNS) || defined(HAVE_IOPS_MKDIR_IDMAP))
zpl_vap_init(vap, dip, mode | S_IFDIR, cr, user_ns);
#else
zpl_vap_init(vap, dip, mode | S_IFDIR, cr, zfs_init_idmap);
#endif
error = -zfsctl_snapdir_mkdir(dip, dname(dentry), vap, &ip, cr, 0);
if (error == 0) {
d_clear_d_op(dentry);
d_set_d_op(dentry, &zpl_dops_snapdirs);
d_instantiate(dentry, ip);
}
kmem_free(vap, sizeof (vattr_t));
ASSERT3S(error, <=, 0);
crfree(cr);
return (error);
}
/*
* Get snapshot directory attributes.
*/
static int
#ifdef HAVE_IDMAP_IOPS_GETATTR
zpl_snapdir_getattr_impl(struct mnt_idmap *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#elif defined(HAVE_USERNS_IOPS_GETATTR)
zpl_snapdir_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#else
zpl_snapdir_getattr_impl(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
#endif
{
(void) request_mask, (void) query_flags;
struct inode *ip = path->dentry->d_inode;
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
return (error);
#if (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
#ifdef HAVE_GENERIC_FILLATTR_USERNS
generic_fillattr(user_ns, ip, stat);
#elif defined(HAVE_GENERIC_FILLATTR_IDMAP)
generic_fillattr(user_ns, ip, stat);
#elif defined(HAVE_GENERIC_FILLATTR_IDMAP_REQMASK)
generic_fillattr(user_ns, request_mask, ip, stat);
#else
(void) user_ns;
#endif
#else
generic_fillattr(ip, stat);
#endif
stat->nlink = stat->size = 2;
dsl_dataset_t *ds = dmu_objset_ds(zfsvfs->z_os);
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
uint64_t snap_count;
int err = zap_count(
dmu_objset_pool(ds->ds_objset)->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
if (err != 0) {
zpl_exit(zfsvfs, FTAG);
return (-err);
}
stat->nlink += snap_count;
}
stat->ctime = stat->mtime = dmu_objset_snap_cmtime(zfsvfs->z_os);
stat->atime = current_time(ip);
zpl_exit(zfsvfs, FTAG);
return (0);
}
ZPL_GETATTR_WRAPPER(zpl_snapdir_getattr);
/*
* The '.zfs/snapshot' directory file operations. These mainly control
* generating the list of available snapshots when doing an 'ls' in the
* directory. See zpl_snapdir_readdir().
*/
const struct file_operations zpl_fops_snapdir = {
.open = zpl_common_open,
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = zpl_snapdir_iterate,
};
/*
* The '.zfs/snapshot' directory inode operations. These mainly control
* creating an inode for a snapshot directory and initializing the needed
* infrastructure to automount the snapshot. See zpl_snapdir_lookup().
*/
const struct inode_operations zpl_ops_snapdir = {
.lookup = zpl_snapdir_lookup,
.getattr = zpl_snapdir_getattr,
#if (defined(HAVE_RENAME_WANTS_FLAGS) || \
defined(HAVE_IOPS_RENAME_USERNS) || \
defined(HAVE_IOPS_RENAME_IDMAP))
.rename = zpl_snapdir_rename2,
#else
.rename = zpl_snapdir_rename,
#endif
.rmdir = zpl_snapdir_rmdir,
.mkdir = zpl_snapdir_mkdir,
};
static struct dentry *
zpl_shares_lookup(struct inode *dip, struct dentry *dentry,
unsigned int flags)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
struct inode *ip = NULL;
int error;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfsctl_shares_lookup(dip, dname(dentry), &ip,
0, cr, NULL, NULL);
ASSERT3S(error, <=, 0);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error) {
if (error == -ENOENT)
return (d_splice_alias(NULL, dentry));
else
return (ERR_PTR(error));
}
return (d_splice_alias(ip, dentry));
}
static int
zpl_shares_iterate(struct file *filp, struct dir_context *ctx)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
znode_t *dzp;
int error = 0;
if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
return (error);
cookie = spl_fstrans_mark();
if (zfsvfs->z_shares_dir == 0) {
dir_emit_dots(filp, ctx);
goto out;
}
error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp);
if (error)
goto out;
crhold(cr);
error = -zfs_readdir(ZTOI(dzp), ctx, cr);
crfree(cr);
iput(ZTOI(dzp));
out:
spl_fstrans_unmark(cookie);
zpl_exit(zfsvfs, FTAG);
ASSERT3S(error, <=, 0);
return (error);
}
static int
#ifdef HAVE_USERNS_IOPS_GETATTR
zpl_shares_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#elif defined(HAVE_IDMAP_IOPS_GETATTR)
zpl_shares_getattr_impl(struct mnt_idmap *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#else
zpl_shares_getattr_impl(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
#endif
{
(void) request_mask, (void) query_flags;
struct inode *ip = path->dentry->d_inode;
zfsvfs_t *zfsvfs = ITOZSB(ip);
znode_t *dzp;
int error;
if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
return (error);
if (zfsvfs->z_shares_dir == 0) {
#if (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
#ifdef HAVE_GENERIC_FILLATTR_USERNS
generic_fillattr(user_ns, path->dentry->d_inode, stat);
#elif defined(HAVE_GENERIC_FILLATTR_IDMAP)
generic_fillattr(user_ns, path->dentry->d_inode, stat);
#elif defined(HAVE_GENERIC_FILLATTR_IDMAP_REQMASK)
generic_fillattr(user_ns, request_mask, ip, stat);
#else
(void) user_ns;
#endif
#else
generic_fillattr(path->dentry->d_inode, stat);
#endif
stat->nlink = stat->size = 2;
stat->atime = current_time(ip);
zpl_exit(zfsvfs, FTAG);
return (0);
}
error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp);
if (error == 0) {
#ifdef HAVE_GENERIC_FILLATTR_IDMAP_REQMASK
error = -zfs_getattr_fast(user_ns, request_mask, ZTOI(dzp),
stat);
#elif (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
error = -zfs_getattr_fast(user_ns, ZTOI(dzp), stat);
#else
error = -zfs_getattr_fast(kcred->user_ns, ZTOI(dzp), stat);
#endif
iput(ZTOI(dzp));
}
zpl_exit(zfsvfs, FTAG);
ASSERT3S(error, <=, 0);
return (error);
}
ZPL_GETATTR_WRAPPER(zpl_shares_getattr);
/*
* The '.zfs/shares' directory file operations.
*/
const struct file_operations zpl_fops_shares = {
.open = zpl_common_open,
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = zpl_shares_iterate,
};
/*
* The '.zfs/shares' directory inode operations.
*/
const struct inode_operations zpl_ops_shares = {
.lookup = zpl_shares_lookup,
.getattr = zpl_shares_getattr,
};
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index 7c9aae6a66af..7c5d567c3239 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -1,1940 +1,1949 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2024, Rob Norris <robn@despairlabs.com>
* Copyright (c) 2024, Klara, Inc.
*/
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dmu_tx.h>
#include <sys/zio.h>
#include <sys/zfs_rlock.h>
#include <sys/spa_impl.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
#include <cityhash.h>
#include <linux/blkdev_compat.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/workqueue.h>
#include <linux/blk-mq.h>
static void zvol_request_impl(zvol_state_t *zv, struct bio *bio,
struct request *rq, boolean_t force_sync);
static unsigned int zvol_major = ZVOL_MAJOR;
static unsigned int zvol_request_sync = 0;
static unsigned int zvol_prefetch_bytes = (128 * 1024);
static unsigned long zvol_max_discard_blocks = 16384;
/*
* Switch taskq at multiple of 512 MB offset. This can be set to a lower value
* to utilize more threads for small files but may affect prefetch hits.
*/
#define ZVOL_TASKQ_OFFSET_SHIFT 29
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
static unsigned int zvol_open_timeout_ms = 1000;
#endif
static unsigned int zvol_threads = 0;
static unsigned int zvol_blk_mq_threads = 0;
static unsigned int zvol_blk_mq_actual_threads;
static boolean_t zvol_use_blk_mq = B_FALSE;
/*
* The maximum number of volblocksize blocks to process per thread. Typically,
* write heavy workloads preform better with higher values here, and read
* heavy workloads preform better with lower values, but that's not a hard
* and fast rule. It's basically a knob to tune between "less overhead with
* less parallelism" and "more overhead, but more parallelism".
*
* '8' was chosen as a reasonable, balanced, default based off of sequential
* read and write tests to a zvol in an NVMe pool (with 16 CPUs).
*/
static unsigned int zvol_blk_mq_blocks_per_thread = 8;
static unsigned int zvol_num_taskqs = 0;
#ifndef BLKDEV_DEFAULT_RQ
/* BLKDEV_MAX_RQ was renamed to BLKDEV_DEFAULT_RQ in the 5.16 kernel */
#define BLKDEV_DEFAULT_RQ BLKDEV_MAX_RQ
#endif
/*
* Finalize our BIO or request.
*/
static inline void
zvol_end_io(struct bio *bio, struct request *rq, int error)
{
if (bio) {
bio->bi_status = errno_to_bi_status(-error);
bio_endio(bio);
} else {
blk_mq_end_request(rq, errno_to_bi_status(error));
}
}
static unsigned int zvol_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
static unsigned int zvol_actual_blk_mq_queue_depth;
struct zvol_state_os {
struct gendisk *zvo_disk; /* generic disk */
struct request_queue *zvo_queue; /* request queue */
dev_t zvo_dev; /* device id */
struct blk_mq_tag_set tag_set;
/* Set from the global 'zvol_use_blk_mq' at zvol load */
boolean_t use_blk_mq;
};
typedef struct zv_taskq {
uint_t tqs_cnt;
taskq_t **tqs_taskq;
} zv_taskq_t;
static zv_taskq_t zvol_taskqs;
static struct ida zvol_ida;
typedef struct zv_request_stack {
zvol_state_t *zv;
struct bio *bio;
struct request *rq;
} zv_request_t;
typedef struct zv_work {
struct request *rq;
struct work_struct work;
} zv_work_t;
typedef struct zv_request_task {
zv_request_t zvr;
taskq_ent_t ent;
} zv_request_task_t;
static zv_request_task_t *
zv_request_task_create(zv_request_t zvr)
{
zv_request_task_t *task;
task = kmem_alloc(sizeof (zv_request_task_t), KM_SLEEP);
taskq_init_ent(&task->ent);
task->zvr = zvr;
return (task);
}
static void
zv_request_task_free(zv_request_task_t *task)
{
kmem_free(task, sizeof (*task));
}
/*
* This is called when a new block multiqueue request comes in. A request
* contains one or more BIOs.
*/
static blk_status_t zvol_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
zvol_state_t *zv = rq->q->queuedata;
/* Tell the kernel that we are starting to process this request */
blk_mq_start_request(rq);
if (blk_rq_is_passthrough(rq)) {
/* Skip non filesystem request */
blk_mq_end_request(rq, BLK_STS_IOERR);
return (BLK_STS_IOERR);
}
zvol_request_impl(zv, NULL, rq, 0);
/* Acknowledge to the kernel that we got this request */
return (BLK_STS_OK);
}
static struct blk_mq_ops zvol_blk_mq_queue_ops = {
.queue_rq = zvol_mq_queue_rq,
};
/* Initialize our blk-mq struct */
static int zvol_blk_mq_alloc_tag_set(zvol_state_t *zv)
{
struct zvol_state_os *zso = zv->zv_zso;
memset(&zso->tag_set, 0, sizeof (zso->tag_set));
/* Initialize tag set. */
zso->tag_set.ops = &zvol_blk_mq_queue_ops;
zso->tag_set.nr_hw_queues = zvol_blk_mq_actual_threads;
zso->tag_set.queue_depth = zvol_actual_blk_mq_queue_depth;
zso->tag_set.numa_node = NUMA_NO_NODE;
zso->tag_set.cmd_size = 0;
/*
* We need BLK_MQ_F_BLOCKING here since we do blocking calls in
* zvol_request_impl()
*/
- zso->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ zso->tag_set.flags = BLK_MQ_F_BLOCKING;
+
+#ifdef BLK_MQ_F_SHOULD_MERGE
+ /*
+ * Linux 6.14 removed BLK_MQ_F_SHOULD_MERGE and made it implicit.
+ * For older kernels, we set it.
+ */
+ zso->tag_set.flags |= BLK_MQ_F_SHOULD_MERGE;
+#endif
+
zso->tag_set.driver_data = zv;
return (blk_mq_alloc_tag_set(&zso->tag_set));
}
/*
* Given a path, return TRUE if path is a ZVOL.
*/
boolean_t
zvol_os_is_zvol(const char *path)
{
dev_t dev = 0;
if (vdev_lookup_bdev(path, &dev) != 0)
return (B_FALSE);
if (MAJOR(dev) == zvol_major)
return (B_TRUE);
return (B_FALSE);
}
static void
zvol_write(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
struct request *rq = zvr->rq;
int error = 0;
zfs_uio_t uio;
zvol_state_t *zv = zvr->zv;
struct request_queue *q;
struct gendisk *disk;
unsigned long start_time = 0;
boolean_t acct = B_FALSE;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
ASSERT3P(zv->zv_zilog, !=, NULL);
q = zv->zv_zso->zvo_queue;
disk = zv->zv_zso->zvo_disk;
/* bio marked as FLUSH need to flush before write */
if (io_is_flush(bio, rq))
zil_commit(zv->zv_zilog, ZVOL_OBJ);
/* Some requests are just for flush and nothing else. */
if (io_size(bio, rq) == 0) {
rw_exit(&zv->zv_suspend_lock);
zvol_end_io(bio, rq, 0);
return;
}
zfs_uio_bvec_init(&uio, bio, rq);
ssize_t start_resid = uio.uio_resid;
/*
* With use_blk_mq, accounting is done by blk_mq_start_request()
* and blk_mq_end_request(), so we can skip it here.
*/
if (bio) {
acct = blk_queue_io_stat(q);
if (acct) {
start_time = blk_generic_start_io_acct(q, disk, WRITE,
bio);
}
}
boolean_t sync =
io_is_fua(bio, rq) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_WRITER);
uint64_t volsize = zv->zv_volsize;
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
uint64_t off = uio.uio_loffset;
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
if (bytes > volsize - off) /* don't write past the end */
bytes = volsize - off;
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
/* This will only fail for ENOSPC */
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
break;
}
error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
if (error == 0) {
zvol_log_write(zv, tx, off, bytes, sync);
}
dmu_tx_commit(tx);
if (error)
break;
}
zfs_rangelock_exit(lr);
int64_t nwritten = start_resid - uio.uio_resid;
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
task_io_account_write(nwritten);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
if (bio && acct) {
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
}
zvol_end_io(bio, rq, -error);
}
static void
zvol_write_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_write(&task->zvr);
zv_request_task_free(task);
}
static void
zvol_discard(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
struct request *rq = zvr->rq;
zvol_state_t *zv = zvr->zv;
uint64_t start = io_offset(bio, rq);
uint64_t size = io_size(bio, rq);
uint64_t end = start + size;
boolean_t sync;
int error = 0;
dmu_tx_t *tx;
struct request_queue *q = zv->zv_zso->zvo_queue;
struct gendisk *disk = zv->zv_zso->zvo_disk;
unsigned long start_time = 0;
boolean_t acct = B_FALSE;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
ASSERT3P(zv->zv_zilog, !=, NULL);
if (bio) {
acct = blk_queue_io_stat(q);
if (acct) {
start_time = blk_generic_start_io_acct(q, disk, WRITE,
bio);
}
}
sync = io_is_fua(bio, rq) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
if (end > zv->zv_volsize) {
error = SET_ERROR(EIO);
goto unlock;
}
/*
* Align the request to volume block boundaries when a secure erase is
* not required. This will prevent dnode_free_range() from zeroing out
* the unaligned parts which is slow (read-modify-write) and useless
* since we are not freeing any space by doing so.
*/
if (!io_is_secure_erase(bio, rq)) {
start = P2ROUNDUP(start, zv->zv_volblocksize);
end = P2ALIGN_TYPED(end, zv->zv_volblocksize, uint64_t);
size = end - start;
}
if (start >= end)
goto unlock;
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
start, size, RL_WRITER);
tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, start, size);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset,
ZVOL_OBJ, start, size);
}
zfs_rangelock_exit(lr);
if (error == 0 && sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
unlock:
rw_exit(&zv->zv_suspend_lock);
if (bio && acct) {
blk_generic_end_io_acct(q, disk, WRITE, bio,
start_time);
}
zvol_end_io(bio, rq, -error);
}
static void
zvol_discard_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_discard(&task->zvr);
zv_request_task_free(task);
}
static void
zvol_read(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
struct request *rq = zvr->rq;
int error = 0;
zfs_uio_t uio;
boolean_t acct = B_FALSE;
zvol_state_t *zv = zvr->zv;
struct request_queue *q;
struct gendisk *disk;
unsigned long start_time = 0;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
zfs_uio_bvec_init(&uio, bio, rq);
q = zv->zv_zso->zvo_queue;
disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
/*
* When blk-mq is being used, accounting is done by
* blk_mq_start_request() and blk_mq_end_request().
*/
if (bio) {
acct = blk_queue_io_stat(q);
if (acct)
start_time = blk_generic_start_io_acct(q, disk, READ,
bio);
}
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_READER);
uint64_t volsize = zv->zv_volsize;
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
/* don't read past the end */
if (bytes > volsize - uio.uio_loffset)
bytes = volsize - uio.uio_loffset;
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
}
zfs_rangelock_exit(lr);
int64_t nread = start_resid - uio.uio_resid;
dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
task_io_account_read(nread);
rw_exit(&zv->zv_suspend_lock);
if (bio && acct) {
blk_generic_end_io_acct(q, disk, READ, bio, start_time);
}
zvol_end_io(bio, rq, -error);
}
static void
zvol_read_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_read(&task->zvr);
zv_request_task_free(task);
}
/*
* Process a BIO or request
*
* Either 'bio' or 'rq' should be set depending on if we are processing a
* bio or a request (both should not be set).
*
* force_sync: Set to 0 to defer processing to a background taskq
* Set to 1 to process data synchronously
*/
static void
zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
boolean_t force_sync)
{
fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = io_offset(bio, rq);
uint64_t size = io_size(bio, rq);
int rw = io_data_dir(bio, rq);
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
zvol_end_io(bio, rq, -SET_ERROR(ENXIO));
goto out;
}
if (zvol_request_sync || zv->zv_threading == B_FALSE)
force_sync = 1;
zv_request_t zvr = {
.zv = zv,
.bio = bio,
.rq = rq,
};
if (io_has_data(bio, rq) && offset + size > zv->zv_volsize) {
printk(KERN_INFO "%s: bad access: offset=%llu, size=%lu\n",
zv->zv_zso->zvo_disk->disk_name,
(long long unsigned)offset,
(long unsigned)size);
zvol_end_io(bio, rq, -SET_ERROR(EIO));
goto out;
}
zv_request_task_t *task;
zv_taskq_t *ztqs = &zvol_taskqs;
uint_t blk_mq_hw_queue = 0;
uint_t tq_idx;
uint_t taskq_hash;
if (rq)
#ifdef HAVE_BLK_MQ_RQ_HCTX
blk_mq_hw_queue = rq->mq_hctx->queue_num;
#else
blk_mq_hw_queue =
rq->q->queue_hw_ctx[rq->q->mq_map[rq->cpu]]->queue_num;
#endif
taskq_hash = cityhash3((uintptr_t)zv, offset >> ZVOL_TASKQ_OFFSET_SHIFT,
blk_mq_hw_queue);
tq_idx = taskq_hash % ztqs->tqs_cnt;
if (rw == WRITE) {
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
zvol_end_io(bio, rq, -SET_ERROR(EROFS));
goto out;
}
/*
* Prevents the zvol from being suspended, or the ZIL being
* concurrently opened. Will be released after the i/o
* completes.
*/
rw_enter(&zv->zv_suspend_lock, RW_READER);
/*
* Open a ZIL if this is the first time we have written to this
* zvol. We protect zv->zv_zilog with zv_suspend_lock rather
* than zv_state_lock so that we don't need to acquire an
* additional lock in this path.
*/
if (zv->zv_zilog == NULL) {
rw_exit(&zv->zv_suspend_lock);
rw_enter(&zv->zv_suspend_lock, RW_WRITER);
if (zv->zv_zilog == NULL) {
zv->zv_zilog = zil_open(zv->zv_objset,
zvol_get_data, &zv->zv_kstat.dk_zil_sums);
zv->zv_flags |= ZVOL_WRITTEN_TO;
/* replay / destroy done in zvol_create_minor */
VERIFY0((zv->zv_zilog->zl_header->zh_flags &
ZIL_REPLAY_NEEDED));
}
rw_downgrade(&zv->zv_suspend_lock);
}
/*
* We don't want this thread to be blocked waiting for i/o to
* complete, so we instead wait from a taskq callback. The
* i/o may be a ZIL write (via zil_commit()), or a read of an
* indirect block, or a read of a data block (if this is a
* partial-block write). We will indicate that the i/o is
* complete by calling END_IO() from the taskq callback.
*
* This design allows the calling thread to continue and
* initiate more concurrent operations by calling
* zvol_request() again. There are typically only a small
* number of threads available to call zvol_request() (e.g.
* one per iSCSI target), so keeping the latency of
* zvol_request() low is important for performance.
*
* The zvol_request_sync module parameter allows this
* behavior to be altered, for performance evaluation
* purposes. If the callback blocks, setting
* zvol_request_sync=1 will result in much worse performance.
*
* We can have up to zvol_threads concurrent i/o's being
* processed for all zvols on the system. This is typically
* a vast improvement over the zvol_request_sync=1 behavior
* of one i/o at a time per zvol. However, an even better
* design would be for zvol_request() to initiate the zio
* directly, and then be notified by the zio_done callback,
* which would call END_IO(). Unfortunately, the DMU/ZIL
* interfaces lack this functionality (they block waiting for
* the i/o to complete).
*/
if (io_is_discard(bio, rq) || io_is_secure_erase(bio, rq)) {
if (force_sync) {
zvol_discard(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(ztqs->tqs_taskq[tq_idx],
zvol_discard_task, task, 0, &task->ent);
}
} else {
if (force_sync) {
zvol_write(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(ztqs->tqs_taskq[tq_idx],
zvol_write_task, task, 0, &task->ent);
}
}
} else {
/*
* The SCST driver, and possibly others, may issue READ I/Os
* with a length of zero bytes. These empty I/Os contain no
* data and require no additional handling.
*/
if (size == 0) {
zvol_end_io(bio, rq, 0);
goto out;
}
rw_enter(&zv->zv_suspend_lock, RW_READER);
/* See comment in WRITE case above. */
if (force_sync) {
zvol_read(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(ztqs->tqs_taskq[tq_idx],
zvol_read_task, task, 0, &task->ent);
}
}
out:
spl_fstrans_unmark(cookie);
}
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
#ifdef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID
static void
zvol_submit_bio(struct bio *bio)
#else
static blk_qc_t
zvol_submit_bio(struct bio *bio)
#endif
#else
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
#endif
{
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
#if defined(HAVE_BIO_BDEV_DISK)
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
#else
struct request_queue *q = bio->bi_disk->queue;
#endif
#endif
zvol_state_t *zv = q->queuedata;
zvol_request_impl(zv, bio, NULL, 0);
#if defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
!defined(HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID)
return (BLK_QC_T_NONE);
#endif
}
static int
#ifdef HAVE_BLK_MODE_T
zvol_open(struct gendisk *disk, blk_mode_t flag)
#else
zvol_open(struct block_device *bdev, fmode_t flag)
#endif
{
zvol_state_t *zv;
int error = 0;
boolean_t drop_suspend = B_FALSE;
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
hrtime_t timeout = MSEC2NSEC(zvol_open_timeout_ms);
hrtime_t start = gethrtime();
retry:
#endif
rw_enter(&zvol_state_lock, RW_READER);
/*
* Obtain a copy of private_data under the zvol_state_lock to make
* sure that either the result of zvol free code path setting
* disk->private_data to NULL is observed, or zvol_os_free()
* is not called on this zv because of the positive zv_open_count.
*/
#ifdef HAVE_BLK_MODE_T
zv = disk->private_data;
#else
zv = bdev->bd_disk->private_data;
#endif
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (-SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zvol_state_lock);
return (-SET_ERROR(ENXIO));
}
/*
* Make sure zvol is not suspended during first open
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
} else {
drop_suspend = B_TRUE;
}
} else {
drop_suspend = B_TRUE;
}
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if (zv->zv_open_count == 0) {
boolean_t drop_namespace = B_FALSE;
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
/*
* In all other call paths the spa_namespace_lock is taken
* before the bdev->bd_mutex lock. However, on open(2)
* the __blkdev_get() function calls fops->open() with the
* bdev->bd_mutex lock held. This can result in a deadlock
* when zvols from one pool are used as vdevs in another.
*
* To prevent a lock inversion deadlock we preemptively
* take the spa_namespace_lock. Normally the lock will not
* be contended and this is safe because spa_open_common()
* handles the case where the caller already holds the
* spa_namespace_lock.
*
* When the lock cannot be aquired after multiple retries
* this must be the vdev on zvol deadlock case and we have
* no choice but to return an error. For 5.12 and older
* kernels returning -ERESTARTSYS will result in the
* bdev->bd_mutex being dropped, then reacquired, and
* fops->open() being called again. This process can be
* repeated safely until both locks are acquired. For 5.13
* and newer the -ERESTARTSYS retry logic was removed from
* the kernel so the only option is to return the error for
* the caller to handle it.
*/
if (!mutex_owned(&spa_namespace_lock)) {
if (!mutex_tryenter(&spa_namespace_lock)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
#ifdef HAVE_BLKDEV_GET_ERESTARTSYS
schedule();
return (-SET_ERROR(ERESTARTSYS));
#else
if ((gethrtime() - start) > timeout)
return (-SET_ERROR(ERESTARTSYS));
schedule_timeout_interruptible(
MSEC_TO_TICK(10));
goto retry;
#endif
} else {
drop_namespace = B_TRUE;
}
}
error = -zvol_first_open(zv, !(blk_mode_is_open_write(flag)));
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
}
if (error == 0) {
if ((blk_mode_is_open_write(flag)) &&
(zv->zv_flags & ZVOL_RDONLY)) {
if (zv->zv_open_count == 0)
zvol_last_close(zv);
error = -SET_ERROR(EROFS);
} else {
zv->zv_open_count++;
}
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
if (error == 0)
#ifdef HAVE_BLK_MODE_T
disk_check_media_change(disk);
#else
zfs_check_media_change(bdev);
#endif
return (error);
}
static void
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG
zvol_release(struct gendisk *disk)
#else
zvol_release(struct gendisk *disk, fmode_t unused)
#endif
{
#if !defined(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG)
(void) unused;
#endif
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
rw_enter(&zvol_state_lock, RW_READER);
zv = disk->private_data;
mutex_enter(&zv->zv_state_lock);
ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 1) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
zv->zv_open_count--;
if (zv->zv_open_count == 0) {
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
}
static int
zvol_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
zvol_state_t *zv = bdev->bd_disk->private_data;
int error = 0;
ASSERT3U(zv->zv_open_count, >, 0);
switch (cmd) {
case BLKFLSBUF:
#ifdef HAVE_FSYNC_BDEV
fsync_bdev(bdev);
#elif defined(HAVE_SYNC_BLOCKDEV)
sync_blockdev(bdev);
#else
#error "Neither fsync_bdev() nor sync_blockdev() found"
#endif
invalidate_bdev(bdev);
rw_enter(&zv->zv_suspend_lock, RW_READER);
if (!(zv->zv_flags & ZVOL_RDONLY))
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
rw_exit(&zv->zv_suspend_lock);
break;
case BLKZNAME:
mutex_enter(&zv->zv_state_lock);
error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
mutex_exit(&zv->zv_state_lock);
break;
default:
error = -ENOTTY;
break;
}
return (SET_ERROR(error));
}
#ifdef CONFIG_COMPAT
static int
zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
return (zvol_ioctl(bdev, mode, cmd, arg));
}
#else
#define zvol_compat_ioctl NULL
#endif
static unsigned int
zvol_check_events(struct gendisk *disk, unsigned int clearing)
{
unsigned int mask = 0;
rw_enter(&zvol_state_lock, RW_READER);
zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
zv->zv_changed = 0;
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (mask);
}
static int
zvol_revalidate_disk(struct gendisk *disk)
{
rw_enter(&zvol_state_lock, RW_READER);
zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
set_capacity(zv->zv_zso->zvo_disk,
zv->zv_volsize >> SECTOR_BITS);
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (0);
}
int
zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
struct gendisk *disk = zv->zv_zso->zvo_disk;
#if defined(HAVE_REVALIDATE_DISK_SIZE)
revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
#elif defined(HAVE_REVALIDATE_DISK)
revalidate_disk(disk);
#else
zvol_revalidate_disk(disk);
#endif
return (0);
}
void
zvol_os_clear_private(zvol_state_t *zv)
{
/*
* Cleared while holding zvol_state_lock as a writer
* which will prevent zvol_open() from opening it.
*/
zv->zv_zso->zvo_disk->private_data = NULL;
}
/*
* Provide a simple virtual geometry for legacy compatibility. For devices
* smaller than 1 MiB a small head and sector count is used to allow very
* tiny devices. For devices over 1 Mib a standard head and sector count
* is used to keep the cylinders count reasonable.
*/
static int
zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
zvol_state_t *zv = bdev->bd_disk->private_data;
sector_t sectors;
ASSERT3U(zv->zv_open_count, >, 0);
sectors = get_capacity(zv->zv_zso->zvo_disk);
if (sectors > 2048) {
geo->heads = 16;
geo->sectors = 63;
} else {
geo->heads = 2;
geo->sectors = 4;
}
geo->start = 0;
geo->cylinders = sectors / (geo->heads * geo->sectors);
return (0);
}
/*
* Why have two separate block_device_operations structs?
*
* Normally we'd just have one, and assign 'submit_bio' as needed. However,
* it's possible the user's kernel is built with CONSTIFY_PLUGIN, meaning we
* can't just change submit_bio dynamically at runtime. So just create two
* separate structs to get around this.
*/
static const struct block_device_operations zvol_ops_blk_mq = {
.open = zvol_open,
.release = zvol_release,
.ioctl = zvol_ioctl,
.compat_ioctl = zvol_compat_ioctl,
.check_events = zvol_check_events,
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
.revalidate_disk = zvol_revalidate_disk,
#endif
.getgeo = zvol_getgeo,
.owner = THIS_MODULE,
};
static const struct block_device_operations zvol_ops = {
.open = zvol_open,
.release = zvol_release,
.ioctl = zvol_ioctl,
.compat_ioctl = zvol_compat_ioctl,
.check_events = zvol_check_events,
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
.revalidate_disk = zvol_revalidate_disk,
#endif
.getgeo = zvol_getgeo,
.owner = THIS_MODULE,
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
.submit_bio = zvol_submit_bio,
#endif
};
/*
* Since 6.9, Linux has been removing queue limit setters in favour of an
* initial queue_limits struct applied when the device is open. Since 6.11,
* queue_limits is being extended to allow more things to be applied when the
* device is open. Setters are also being removed for this.
*
* For OpenZFS, this means that depending on kernel version, some options may
* be set up before the device is open, and some applied to an open device
* (queue) after the fact.
*
* We manage this complexity by having our own limits struct,
* zvol_queue_limits_t, in which we carry any queue config that we're
* interested in setting. This structure is the same on all kernels.
*
* These limits are then applied to the queue at device open time by the most
* appropriate method for the kernel.
*
* zvol_queue_limits_convert() is used on 6.9+ (where the two-arg form of
* blk_alloc_disk() exists). This converts our limits struct to a proper Linux
* struct queue_limits, and passes it in. Any fields added in later kernels are
* (obviously) not set up here.
*
* zvol_queue_limits_apply() is called on all kernel versions after the queue
* is created, and applies any remaining config. Before 6.9 that will be
* everything, via setter methods. After 6.9 that will be whatever couldn't be
* put into struct queue_limits. (This implies that zvol_queue_limits_apply()
* will always be a no-op on the latest kernel we support).
*/
typedef struct zvol_queue_limits {
unsigned int zql_max_hw_sectors;
unsigned short zql_max_segments;
unsigned int zql_max_segment_size;
unsigned int zql_io_opt;
unsigned int zql_physical_block_size;
unsigned int zql_max_discard_sectors;
unsigned int zql_discard_granularity;
} zvol_queue_limits_t;
static void
zvol_queue_limits_init(zvol_queue_limits_t *limits, zvol_state_t *zv,
boolean_t use_blk_mq)
{
limits->zql_max_hw_sectors = (DMU_MAX_ACCESS / 4) >> 9;
if (use_blk_mq) {
/*
* IO requests can be really big (1MB). When an IO request
* comes in, it is passed off to zvol_read() or zvol_write()
* in a new thread, where it is chunked up into 'volblocksize'
* sized pieces and processed. So for example, if the request
* is a 1MB write and your volblocksize is 128k, one zvol_write
* thread will take that request and sequentially do ten 128k
* IOs. This is due to the fact that the thread needs to lock
* each volblocksize sized block. So you might be wondering:
* "instead of passing the whole 1MB request to one thread,
* why not pass ten individual 128k chunks to ten threads and
* process the whole write in parallel?" The short answer is
* that there's a sweet spot number of chunks that balances
* the greater parallelism with the added overhead of more
* threads. The sweet spot can be different depending on if you
* have a read or write heavy workload. Writes typically want
* high chunk counts while reads typically want lower ones. On
* a test pool with 6 NVMe drives in a 3x 2-disk mirror
* configuration, with volblocksize=8k, the sweet spot for good
* sequential reads and writes was at 8 chunks.
*/
/*
* Below we tell the kernel how big we want our requests
* to be. You would think that blk_queue_io_opt() would be
* used to do this since it is used to "set optimal request
* size for the queue", but that doesn't seem to do
* anything - the kernel still gives you huge requests
* with tons of little PAGE_SIZE segments contained within it.
*
* Knowing that the kernel will just give you PAGE_SIZE segments
* no matter what, you can say "ok, I want PAGE_SIZE byte
* segments, and I want 'N' of them per request", where N is
* the correct number of segments for the volblocksize and
* number of chunks you want.
*/
if (zvol_blk_mq_blocks_per_thread != 0) {
unsigned int chunks;
chunks = MIN(zvol_blk_mq_blocks_per_thread, UINT16_MAX);
limits->zql_max_segment_size = PAGE_SIZE;
limits->zql_max_segments =
(zv->zv_volblocksize * chunks) / PAGE_SIZE;
} else {
/*
* Special case: zvol_blk_mq_blocks_per_thread = 0
* Max everything out.
*/
limits->zql_max_segments = UINT16_MAX;
limits->zql_max_segment_size = UINT_MAX;
}
} else {
limits->zql_max_segments = UINT16_MAX;
limits->zql_max_segment_size = UINT_MAX;
}
limits->zql_io_opt = DMU_MAX_ACCESS / 2;
limits->zql_physical_block_size = zv->zv_volblocksize;
limits->zql_max_discard_sectors =
(zvol_max_discard_blocks * zv->zv_volblocksize) >> 9;
limits->zql_discard_granularity = zv->zv_volblocksize;
}
#ifdef HAVE_BLK_ALLOC_DISK_2ARG
static void
zvol_queue_limits_convert(zvol_queue_limits_t *limits,
struct queue_limits *qlimits)
{
memset(qlimits, 0, sizeof (struct queue_limits));
qlimits->max_hw_sectors = limits->zql_max_hw_sectors;
qlimits->max_segments = limits->zql_max_segments;
qlimits->max_segment_size = limits->zql_max_segment_size;
qlimits->io_opt = limits->zql_io_opt;
qlimits->physical_block_size = limits->zql_physical_block_size;
qlimits->max_discard_sectors = limits->zql_max_discard_sectors;
qlimits->max_hw_discard_sectors = limits->zql_max_discard_sectors;
qlimits->discard_granularity = limits->zql_discard_granularity;
#ifdef HAVE_BLKDEV_QUEUE_LIMITS_FEATURES
qlimits->features =
BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_IO_STAT;
#endif
}
#endif
static void
zvol_queue_limits_apply(zvol_queue_limits_t *limits,
struct request_queue *queue)
{
#ifndef HAVE_BLK_ALLOC_DISK_2ARG
blk_queue_max_hw_sectors(queue, limits->zql_max_hw_sectors);
blk_queue_max_segments(queue, limits->zql_max_segments);
blk_queue_max_segment_size(queue, limits->zql_max_segment_size);
blk_queue_io_opt(queue, limits->zql_io_opt);
blk_queue_physical_block_size(queue, limits->zql_physical_block_size);
blk_queue_max_discard_sectors(queue, limits->zql_max_discard_sectors);
blk_queue_discard_granularity(queue, limits->zql_discard_granularity);
#endif
#ifndef HAVE_BLKDEV_QUEUE_LIMITS_FEATURES
blk_queue_set_write_cache(queue, B_TRUE);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, queue);
#endif
}
static int
zvol_alloc_non_blk_mq(struct zvol_state_os *zso, zvol_queue_limits_t *limits)
{
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)
#if defined(HAVE_BLK_ALLOC_DISK)
zso->zvo_disk = blk_alloc_disk(NUMA_NO_NODE);
if (zso->zvo_disk == NULL)
return (1);
zso->zvo_disk->minors = ZVOL_MINORS;
zso->zvo_queue = zso->zvo_disk->queue;
#elif defined(HAVE_BLK_ALLOC_DISK_2ARG)
struct queue_limits qlimits;
zvol_queue_limits_convert(limits, &qlimits);
struct gendisk *disk = blk_alloc_disk(&qlimits, NUMA_NO_NODE);
if (IS_ERR(disk)) {
zso->zvo_disk = NULL;
return (1);
}
zso->zvo_disk = disk;
zso->zvo_disk->minors = ZVOL_MINORS;
zso->zvo_queue = zso->zvo_disk->queue;
#else
zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE);
if (zso->zvo_queue == NULL)
return (1);
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
if (zso->zvo_disk == NULL) {
blk_cleanup_queue(zso->zvo_queue);
return (1);
}
zso->zvo_disk->queue = zso->zvo_queue;
#endif /* HAVE_BLK_ALLOC_DISK */
#else
zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
if (zso->zvo_queue == NULL)
return (1);
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
if (zso->zvo_disk == NULL) {
blk_cleanup_queue(zso->zvo_queue);
return (1);
}
zso->zvo_disk->queue = zso->zvo_queue;
#endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
zvol_queue_limits_apply(limits, zso->zvo_queue);
return (0);
}
static int
zvol_alloc_blk_mq(zvol_state_t *zv, zvol_queue_limits_t *limits)
{
struct zvol_state_os *zso = zv->zv_zso;
/* Allocate our blk-mq tag_set */
if (zvol_blk_mq_alloc_tag_set(zv) != 0)
return (1);
#if defined(HAVE_BLK_ALLOC_DISK)
zso->zvo_disk = blk_mq_alloc_disk(&zso->tag_set, zv);
if (zso->zvo_disk == NULL) {
blk_mq_free_tag_set(&zso->tag_set);
return (1);
}
zso->zvo_queue = zso->zvo_disk->queue;
zso->zvo_disk->minors = ZVOL_MINORS;
#elif defined(HAVE_BLK_ALLOC_DISK_2ARG)
struct queue_limits qlimits;
zvol_queue_limits_convert(limits, &qlimits);
struct gendisk *disk = blk_mq_alloc_disk(&zso->tag_set, &qlimits, zv);
if (IS_ERR(disk)) {
zso->zvo_disk = NULL;
blk_mq_free_tag_set(&zso->tag_set);
return (1);
}
zso->zvo_disk = disk;
zso->zvo_queue = zso->zvo_disk->queue;
zso->zvo_disk->minors = ZVOL_MINORS;
#else
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
if (zso->zvo_disk == NULL) {
blk_cleanup_queue(zso->zvo_queue);
blk_mq_free_tag_set(&zso->tag_set);
return (1);
}
/* Allocate queue */
zso->zvo_queue = blk_mq_init_queue(&zso->tag_set);
if (IS_ERR(zso->zvo_queue)) {
blk_mq_free_tag_set(&zso->tag_set);
return (1);
}
/* Our queue is now created, assign it to our disk */
zso->zvo_disk->queue = zso->zvo_queue;
#endif
zvol_queue_limits_apply(limits, zso->zvo_queue);
return (0);
}
/*
* Allocate memory for a new zvol_state_t and setup the required
* request queue and generic disk structures for the block device.
*/
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name, uint64_t volblocksize)
{
zvol_state_t *zv;
struct zvol_state_os *zso;
uint64_t volmode;
int ret;
if (dsl_prop_get_integer(name, "volmode", &volmode, NULL) != 0)
return (NULL);
if (volmode == ZFS_VOLMODE_DEFAULT)
volmode = zvol_volmode;
if (volmode == ZFS_VOLMODE_NONE)
return (NULL);
zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
zv->zv_zso = zso;
zv->zv_volmode = volmode;
zv->zv_volblocksize = volblocksize;
list_link_init(&zv->zv_next);
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&zv->zv_removing_cv, NULL, CV_DEFAULT, NULL);
zv->zv_zso->use_blk_mq = zvol_use_blk_mq;
zvol_queue_limits_t limits;
zvol_queue_limits_init(&limits, zv, zv->zv_zso->use_blk_mq);
/*
* The block layer has 3 interfaces for getting BIOs:
*
* 1. blk-mq request queues (new)
* 2. submit_bio() (oldest)
* 3. regular request queues (old).
*
* Each of those interfaces has two permutations:
*
* a) We have blk_alloc_disk()/blk_mq_alloc_disk(), which allocates
* both the disk and its queue (5.14 kernel or newer)
*
* b) We don't have blk_*alloc_disk(), and have to allocate the
* disk and the queue separately. (5.13 kernel or older)
*/
if (zv->zv_zso->use_blk_mq) {
ret = zvol_alloc_blk_mq(zv, &limits);
zso->zvo_disk->fops = &zvol_ops_blk_mq;
} else {
ret = zvol_alloc_non_blk_mq(zso, &limits);
zso->zvo_disk->fops = &zvol_ops;
}
if (ret != 0)
goto out_kmem;
/* Limit read-ahead to a single page to prevent over-prefetching. */
blk_queue_set_read_ahead(zso->zvo_queue, 1);
if (!zv->zv_zso->use_blk_mq) {
/* Disable write merging in favor of the ZIO pipeline. */
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue);
}
zso->zvo_queue->queuedata = zv;
zso->zvo_dev = dev;
zv->zv_open_count = 0;
strlcpy(zv->zv_name, name, sizeof (zv->zv_name));
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
zso->zvo_disk->major = zvol_major;
zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE;
/*
* Setting ZFS_VOLMODE_DEV disables partitioning on ZVOL devices.
* This is accomplished by limiting the number of minors for the
* device to one and explicitly disabling partition scanning.
*/
if (volmode == ZFS_VOLMODE_DEV) {
zso->zvo_disk->minors = 1;
zso->zvo_disk->flags &= ~GENHD_FL_EXT_DEVT;
zso->zvo_disk->flags |= GENHD_FL_NO_PART;
}
zso->zvo_disk->first_minor = (dev & MINORMASK);
zso->zvo_disk->private_data = zv;
snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d",
ZVOL_DEV_NAME, (dev & MINORMASK));
return (zv);
out_kmem:
kmem_free(zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
return (NULL);
}
/*
* Cleanup then free a zvol_state_t which was created by zvol_alloc().
* At this time, the structure is not opened by anyone, is taken off
* the zvol_state_list, and has its private data set to NULL.
* The zvol_state_lock is dropped.
*
* This function may take many milliseconds to complete (e.g. we've seen
* it take over 256ms), due to the calls to "blk_cleanup_queue" and
* "del_gendisk". Thus, consumers need to be careful to account for this
* latency when calling this function.
*/
void
zvol_os_free(zvol_state_t *zv)
{
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
del_gendisk(zv->zv_zso->zvo_disk);
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
(defined(HAVE_BLK_ALLOC_DISK) || defined(HAVE_BLK_ALLOC_DISK_2ARG))
#if defined(HAVE_BLK_CLEANUP_DISK)
blk_cleanup_disk(zv->zv_zso->zvo_disk);
#else
put_disk(zv->zv_zso->zvo_disk);
#endif
#else
blk_cleanup_queue(zv->zv_zso->zvo_queue);
put_disk(zv->zv_zso->zvo_disk);
#endif
if (zv->zv_zso->use_blk_mq)
blk_mq_free_tag_set(&zv->zv_zso->tag_set);
ida_simple_remove(&zvol_ida,
MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
cv_destroy(&zv->zv_removing_cv);
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
}
void
zvol_wait_close(zvol_state_t *zv)
{
}
struct add_disk_work {
struct delayed_work work;
struct gendisk *disk;
int error;
};
static int
__zvol_os_add_disk(struct gendisk *disk)
{
int error = 0;
#ifdef HAVE_ADD_DISK_RET
error = add_disk(disk);
#else
add_disk(disk);
#endif
return (error);
}
#if defined(HAVE_BDEV_FILE_OPEN_BY_PATH)
static void
zvol_os_add_disk_work(struct work_struct *work)
{
struct add_disk_work *add_disk_work;
add_disk_work = container_of(work, struct add_disk_work, work.work);
add_disk_work->error = __zvol_os_add_disk(add_disk_work->disk);
}
#endif
/*
* SPECIAL CASE:
*
* This function basically calls add_disk() from a workqueue. You may be
* thinking: why not just call add_disk() directly?
*
* When you call add_disk(), the zvol appears to the world. When this happens,
* the kernel calls disk_scan_partitions() on the zvol, which behaves
* differently on the 6.9+ kernels:
*
* - 6.8 and older kernels -
* disk_scan_partitions()
* handle = bdev_open_by_dev(
* zvol_open()
* bdev_release(handle);
* zvol_release()
*
*
* - 6.9+ kernels -
* disk_scan_partitions()
* file = bdev_file_open_by_dev()
* zvol_open()
* fput(file)
* < wait for return to userspace >
* zvol_release()
*
* The difference is that the bdev_release() from the 6.8 kernel is synchronous
* while the fput() from the 6.9 kernel is async. Or more specifically it's
* async that has to wait until we return to userspace (since it adds the fput
* into the caller's work queue with the TWA_RESUME flag set). This is not the
* behavior we want, since we want do things like create+destroy a zvol within
* a single ZFS_IOC_CREATE ioctl, and the "create" part needs to release the
* reference to the zvol while we're in the IOCTL, which can't wait until we
* return to userspace.
*
* We can get around this since fput() has a special codepath for when it's
* running in a kernel thread or interrupt. In those cases, it just puts the
* fput into the system workqueue, which we can force to run with
* __flush_workqueue(). That is why we call add_disk() from a workqueue - so it
* run from a kernel thread and "tricks" the fput() codepaths.
*
* Note that __flush_workqueue() is slowly getting deprecated. This may be ok
* though, since our IOCTL will spin on EBUSY waiting for the zvol release (via
* fput) to happen, which it eventually, naturally, will from the system_wq
* without us explicitly calling __flush_workqueue().
*/
static int
zvol_os_add_disk(struct gendisk *disk)
{
#if defined(HAVE_BDEV_FILE_OPEN_BY_PATH) /* 6.9+ kernel */
struct add_disk_work add_disk_work;
INIT_DELAYED_WORK(&add_disk_work.work, zvol_os_add_disk_work);
add_disk_work.disk = disk;
add_disk_work.error = 0;
/* Use *_delayed_work functions since they're not GPL'd */
schedule_delayed_work(&add_disk_work.work, 0);
flush_delayed_work(&add_disk_work.work);
__flush_workqueue(system_wq);
return (add_disk_work.error);
#else /* <= 6.8 kernel */
return (__zvol_os_add_disk(disk));
#endif
}
/*
* Create a block device minor node and setup the linkage between it
* and the specified volume. Once this function returns the block
* device is live and ready for use.
*/
int
zvol_os_create_minor(const char *name)
{
zvol_state_t *zv;
objset_t *os;
dmu_object_info_t *doi;
uint64_t volsize;
uint64_t len;
unsigned minor = 0;
int error = 0;
int idx;
uint64_t hash = zvol_name_hash(name);
uint64_t volthreading;
bool replayed_zil = B_FALSE;
if (zvol_inhibit_dev)
return (0);
idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
if (idx < 0)
return (SET_ERROR(-idx));
minor = idx << ZVOL_MINOR_BITS;
if (MINOR(minor) != minor) {
/* too many partitions can cause an overflow */
zfs_dbgmsg("zvol: create minor overflow: %s, minor %u/%u",
name, minor, MINOR(minor));
ida_simple_remove(&zvol_ida, idx);
return (SET_ERROR(EINVAL));
}
zv = zvol_find_by_name_hash(name, hash, RW_NONE);
if (zv) {
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
mutex_exit(&zv->zv_state_lock);
ida_simple_remove(&zvol_ida, idx);
return (SET_ERROR(EEXIST));
}
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
if (error)
goto out_doi;
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error)
goto out_dmu_objset_disown;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_dmu_objset_disown;
zv = zvol_alloc(MKDEV(zvol_major, minor), name,
doi->doi_data_block_size);
if (zv == NULL) {
error = SET_ERROR(EAGAIN);
goto out_dmu_objset_disown;
}
zv->zv_hash = hash;
if (dmu_objset_is_snapshot(os))
zv->zv_flags |= ZVOL_RDONLY;
zv->zv_volsize = volsize;
zv->zv_objset = os;
/* Default */
zv->zv_threading = B_TRUE;
if (dsl_prop_get_integer(name, "volthreading", &volthreading, NULL)
== 0)
zv->zv_threading = volthreading;
set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
#ifdef QUEUE_FLAG_DISCARD
blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
#endif
#ifdef QUEUE_FLAG_NONROT
blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue);
#endif
#ifdef QUEUE_FLAG_ADD_RANDOM
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue);
#endif
/* This flag was introduced in kernel version 4.12. */
#ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
#endif
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
if (error)
goto out_dmu_objset_disown;
ASSERT3P(zv->zv_zilog, ==, NULL);
zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
replayed_zil = zil_destroy(zv->zv_zilog, B_FALSE);
else
replayed_zil = zil_replay(os, zv, zvol_replay_vector);
}
if (replayed_zil)
zil_close(zv->zv_zilog);
zv->zv_zilog = NULL;
/*
* When udev detects the addition of the device it will immediately
* invoke blkid(8) to determine the type of content on the device.
* Prefetching the blocks commonly scanned by blkid(8) will speed
* up this process.
*/
len = MIN(zvol_prefetch_bytes, SPA_MAXBLOCKSIZE);
if (len > 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
ZIO_PRIORITY_SYNC_READ);
}
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, B_TRUE, FTAG);
out_doi:
kmem_free(doi, sizeof (dmu_object_info_t));
/*
* Keep in mind that once add_disk() is called, the zvol is
* announced to the world, and zvol_open()/zvol_release() can
* be called at any time. Incidentally, add_disk() itself calls
* zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
* directly as well.
*/
if (error == 0) {
rw_enter(&zvol_state_lock, RW_WRITER);
zvol_insert(zv);
rw_exit(&zvol_state_lock);
error = zvol_os_add_disk(zv->zv_zso->zvo_disk);
} else {
ida_simple_remove(&zvol_ida, idx);
}
return (error);
}
void
zvol_os_rename_minor(zvol_state_t *zv, const char *newname)
{
int readonly = get_disk_ro(zv->zv_zso->zvo_disk);
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
/* move to new hashtable entry */
zv->zv_hash = zvol_name_hash(newname);
hlist_del(&zv->zv_hlink);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
/*
* The block device's read-only state is briefly changed causing
* a KOBJ_CHANGE uevent to be issued. This ensures udev detects
* the name change and fixes the symlinks. This does not change
* ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
* changes. This would normally be done using kobject_uevent() but
* that is a GPL-only symbol which is why we need this workaround.
*/
set_disk_ro(zv->zv_zso->zvo_disk, !readonly);
set_disk_ro(zv->zv_zso->zvo_disk, readonly);
dataset_kstats_rename(&zv->zv_kstat, newname);
}
void
zvol_os_set_disk_ro(zvol_state_t *zv, int flags)
{
set_disk_ro(zv->zv_zso->zvo_disk, flags);
}
void
zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity)
{
set_capacity(zv->zv_zso->zvo_disk, capacity);
}
int
zvol_init(void)
{
int error;
/*
* zvol_threads is the module param the user passes in.
*
* zvol_actual_threads is what we use internally, since the user can
* pass zvol_thread = 0 to mean "use all the CPUs" (the default).
*/
static unsigned int zvol_actual_threads;
if (zvol_threads == 0) {
/*
* See dde9380a1 for why 32 was chosen here. This should
* probably be refined to be some multiple of the number
* of CPUs.
*/
zvol_actual_threads = MAX(num_online_cpus(), 32);
} else {
zvol_actual_threads = MIN(MAX(zvol_threads, 1), 1024);
}
/*
* Use atleast 32 zvol_threads but for many core system,
* prefer 6 threads per taskq, but no more taskqs
* than threads in them on large systems.
*
* taskq total
* cpus taskqs threads threads
* ------- ------- ------- -------
* 1 1 32 32
* 2 1 32 32
* 4 1 32 32
* 8 2 16 32
* 16 3 11 33
* 32 5 7 35
* 64 8 8 64
* 128 11 12 132
* 256 16 16 256
*/
zv_taskq_t *ztqs = &zvol_taskqs;
uint_t num_tqs = MIN(num_online_cpus(), zvol_num_taskqs);
if (num_tqs == 0) {
num_tqs = 1 + num_online_cpus() / 6;
while (num_tqs * num_tqs > zvol_actual_threads)
num_tqs--;
}
uint_t per_tq_thread = zvol_actual_threads / num_tqs;
if (per_tq_thread * num_tqs < zvol_actual_threads)
per_tq_thread++;
ztqs->tqs_cnt = num_tqs;
ztqs->tqs_taskq = kmem_alloc(num_tqs * sizeof (taskq_t *), KM_SLEEP);
error = register_blkdev(zvol_major, ZVOL_DRIVER);
if (error) {
kmem_free(ztqs->tqs_taskq, ztqs->tqs_cnt * sizeof (taskq_t *));
ztqs->tqs_taskq = NULL;
printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
return (error);
}
if (zvol_blk_mq_queue_depth == 0) {
zvol_actual_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
} else {
zvol_actual_blk_mq_queue_depth =
MAX(zvol_blk_mq_queue_depth, BLKDEV_MIN_RQ);
}
if (zvol_blk_mq_threads == 0) {
zvol_blk_mq_actual_threads = num_online_cpus();
} else {
zvol_blk_mq_actual_threads = MIN(MAX(zvol_blk_mq_threads, 1),
1024);
}
for (uint_t i = 0; i < num_tqs; i++) {
char name[32];
(void) snprintf(name, sizeof (name), "%s_tq-%u",
ZVOL_DRIVER, i);
ztqs->tqs_taskq[i] = taskq_create(name, per_tq_thread,
maxclsyspri, per_tq_thread, INT_MAX,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
if (ztqs->tqs_taskq[i] == NULL) {
for (int j = i - 1; j >= 0; j--)
taskq_destroy(ztqs->tqs_taskq[j]);
unregister_blkdev(zvol_major, ZVOL_DRIVER);
kmem_free(ztqs->tqs_taskq, ztqs->tqs_cnt *
sizeof (taskq_t *));
ztqs->tqs_taskq = NULL;
return (-ENOMEM);
}
}
zvol_init_impl();
ida_init(&zvol_ida);
return (0);
}
void
zvol_fini(void)
{
zv_taskq_t *ztqs = &zvol_taskqs;
zvol_fini_impl();
unregister_blkdev(zvol_major, ZVOL_DRIVER);
if (ztqs->tqs_taskq == NULL) {
ASSERT3U(ztqs->tqs_cnt, ==, 0);
} else {
for (uint_t i = 0; i < ztqs->tqs_cnt; i++) {
ASSERT3P(ztqs->tqs_taskq[i], !=, NULL);
taskq_destroy(ztqs->tqs_taskq[i]);
}
kmem_free(ztqs->tqs_taskq, ztqs->tqs_cnt *
sizeof (taskq_t *));
ztqs->tqs_taskq = NULL;
}
ida_destroy(&zvol_ida);
}
module_param(zvol_inhibit_dev, uint, 0644);
MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
module_param(zvol_major, uint, 0444);
MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
module_param(zvol_threads, uint, 0444);
MODULE_PARM_DESC(zvol_threads, "Number of threads to handle I/O requests. Set"
"to 0 to use all active CPUs");
module_param(zvol_request_sync, uint, 0644);
MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests");
module_param(zvol_max_discard_blocks, ulong, 0444);
MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
module_param(zvol_num_taskqs, uint, 0444);
MODULE_PARM_DESC(zvol_num_taskqs, "Number of zvol taskqs");
module_param(zvol_prefetch_bytes, uint, 0644);
MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
module_param(zvol_volmode, uint, 0644);
MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
module_param(zvol_blk_mq_queue_depth, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_queue_depth, "Default blk-mq queue depth");
module_param(zvol_use_blk_mq, uint, 0644);
MODULE_PARM_DESC(zvol_use_blk_mq, "Use the blk-mq API for zvols");
module_param(zvol_blk_mq_blocks_per_thread, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_blocks_per_thread,
"Process volblocksize blocks per thread");
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
module_param(zvol_open_timeout_ms, uint, 0644);
MODULE_PARM_DESC(zvol_open_timeout_ms, "Timeout for ZVOL open retries");
#endif
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index bd6f076dfbbd..1f653d953113 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -1,11096 +1,11097 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2020, Delphix. All rights reserved.
* Copyright (c) 2014, Saso Kiselkov. All rights reserved.
* Copyright (c) 2017, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2020, George Amanakis. All rights reserved.
* Copyright (c) 2019, 2024, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020, The FreeBSD Foundation [1]
* Copyright (c) 2021, 2024 by George Melikov. All rights reserved.
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* DVA-based Adjustable Replacement Cache
*
* While much of the theory of operation used here is
* based on the self-tuning, low overhead replacement cache
* presented by Megiddo and Modha at FAST 2003, there are some
* significant differences:
*
* 1. The Megiddo and Modha model assumes any page is evictable.
* Pages in its cache cannot be "locked" into memory. This makes
* the eviction algorithm simple: evict the last page in the list.
* This also make the performance characteristics easy to reason
* about. Our cache is not so simple. At any given moment, some
* subset of the blocks in the cache are un-evictable because we
* have handed out a reference to them. Blocks are only evictable
* when there are no external references active. This makes
* eviction far more problematic: we choose to evict the evictable
* blocks that are the "lowest" in the list.
*
* There are times when it is not possible to evict the requested
* space. In these circumstances we are unable to adjust the cache
* size. To prevent the cache growing unbounded at these times we
* implement a "cache throttle" that slows the flow of new data
* into the cache until we can make space available.
*
* 2. The Megiddo and Modha model assumes a fixed cache size.
* Pages are evicted when the cache is full and there is a cache
* miss. Our model has a variable sized cache. It grows with
* high use, but also tries to react to memory pressure from the
* operating system: decreasing its size when system memory is
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (ranging from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
* See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
* by N. Megiddo & D. Modha, FAST 2003
*/
/*
* The locking model:
*
* A new reference to a cache buffer can be obtained in two
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal ARC algorithms for
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* ARC list locks.
*
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
* NULL for the mutex if the buffer was not in the table.
*
* buf_hash_remove() expects the appropriate hash mutex to be
* already held before it is invoked.
*
* Each ARC state also has a mutex which is used to protect the
* buffer list associated with the state. When attempting to
* obtain a hash table lock while holding an ARC list lock you
* must use: mutex_tryenter() to avoid deadlock. Also note that
* the active state mutex must be held before the ghost state mutex.
*
* It as also possible to register a callback which is run when the
* metadata limit is reached and no buffers can be safely evicted. In
* this case the arc user should drop a reference on some arc buffers so
* they can be reclaimed. For example, when using the ZPL each dentry
* holds a references on a znode. These dentries must be pruned before
* the arc buffer holding the znode can be safely evicted.
*
* Note that the majority of the performance stats are manipulated
* with atomic operations.
*
* The L2ARC uses the l2ad_mtx on each vdev for the following:
*
* - L2ARC buflist creation
* - L2ARC buflist eviction
* - L2ARC write completion, which walks L2ARC buflists
* - ARC header destruction, as it removes from L2ARC buflists
* - ARC header release, as it removes from L2ARC buflists
*/
/*
* ARC operation:
*
* Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
* This structure can point either to a block that is still in the cache or to
* one that is only accessible in an L2 ARC device, or it can provide
* information about a block that was recently evicted. If a block is
* only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
* information to retrieve it from the L2ARC device. This information is
* stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
* that is in this state cannot access the data directly.
*
* Blocks that are actively being referenced or have not been evicted
* are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
* the arc_buf_hdr_t that will point to the data block in memory. A block can
* only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
* caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
* also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
*
* The L1ARC's data pointer may or may not be uncompressed. The ARC has the
* ability to store the physical data (b_pabd) associated with the DVA of the
* arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
* it will match its on-disk compression characteristics. This behavior can be
* disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
* compressed ARC functionality is disabled, the b_pabd will point to an
* uncompressed version of the on-disk data.
*
* Data in the L1ARC is not accessed by consumers of the ARC directly. Each
* arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
* Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
* consumer. The ARC will provide references to this data and will keep it
* cached until it is no longer in use. The ARC caches only the L1ARC's physical
* data block and will evict any arc_buf_t that is no longer referenced. The
* amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
* "overhead_size" kstat.
*
* Depending on the consumer, an arc_buf_t can be requested in uncompressed or
* compressed form. The typical case is that consumers will want uncompressed
* data, and when that happens a new data buffer is allocated where the data is
* decompressed for them to use. Currently the only consumer who wants
* compressed arc_buf_t's is "zfs send", when it streams data exactly as it
* exists on disk. When this happens, the arc_buf_t's data buffer is shared
* with the arc_buf_hdr_t.
*
* Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
* first one is owned by a compressed send consumer (and therefore references
* the same compressed data buffer as the arc_buf_hdr_t) and the second could be
* used by any other consumer (and has its own uncompressed copy of the data
* buffer).
*
* arc_buf_hdr_t
* +-----------+
* | fields |
* | common to |
* | L1- and |
* | L2ARC |
* +-----------+
* | l2arc_buf_hdr_t
* | |
* +-----------+
* | l1arc_buf_hdr_t
* | | arc_buf_t
* | b_buf +------------>+-----------+ arc_buf_t
* | b_pabd +-+ |b_next +---->+-----------+
* +-----------+ | |-----------| |b_next +-->NULL
* | |b_comp = T | +-----------+
* | |b_data +-+ |b_comp = F |
* | +-----------+ | |b_data +-+
* +->+------+ | +-----------+ |
* compressed | | | |
* data | |<--------------+ | uncompressed
* +------+ compressed, | data
* shared +-->+------+
* data | |
* | |
* +------+
*
* When a consumer reads a block, the ARC must first look to see if the
* arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
* arc_buf_t and either copies uncompressed data into a new data buffer from an
* existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
* new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
* hdr is compressed and the desired compression characteristics of the
* arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
* arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
* the last buffer in the hdr's b_buf list, however a shared compressed buf can
* be anywhere in the hdr's list.
*
* The diagram below shows an example of an uncompressed ARC hdr that is
* sharing its data with an arc_buf_t (note that the shared uncompressed buf is
* the last element in the buf list):
*
* arc_buf_hdr_t
* +-----------+
* | |
* | |
* | |
* +-----------+
* l2arc_buf_hdr_t| |
* | |
* +-----------+
* l1arc_buf_hdr_t| |
* | | arc_buf_t (shared)
* | b_buf +------------>+---------+ arc_buf_t
* | | |b_next +---->+---------+
* | b_pabd +-+ |---------| |b_next +-->NULL
* +-----------+ | | | +---------+
* | |b_data +-+ | |
* | +---------+ | |b_data +-+
* +->+------+ | +---------+ |
* | | | |
* uncompressed | | | |
* data +------+ | |
* ^ +->+------+ |
* | uncompressed | | |
* | data | | |
* | +------+ |
* +---------------------------------+
*
* Writing to the ARC requires that the ARC first discard the hdr's b_pabd
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
* with the transformed data and will memcpy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
* were originally other readers of the buf's original hdr). This ensures that
* the ARC only needs to update a single buf and its hdr after a write occurs.
*
* When the L2ARC is in use, it will also take advantage of the b_pabd. The
* L2ARC will always write the contents of b_pabd to the L2ARC. This means
* that when compressed ARC is enabled that the L2ARC blocks are identical
* to the on-disk block in the main data pool. This provides a significant
* advantage since the ARC can leverage the bp's checksum when reading from the
* L2ARC to determine if the contents are valid. However, if the compressed
* ARC is disabled, then the L2ARC's block must be transformed to look
* like the physical block in the main data pool before comparing the
* checksum and determining its validity.
*
* The L1ARC has a slightly different system for storing encrypted data.
* Raw (encrypted + possibly compressed) data has a few subtle differences from
* data that is just compressed. The biggest difference is that it is not
* possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
* The other difference is that encryption cannot be treated as a suggestion.
* If a caller would prefer compressed data, but they actually wind up with
* uncompressed data the worst thing that could happen is there might be a
* performance hit. If the caller requests encrypted data, however, we must be
* sure they actually get it or else secret information could be leaked. Raw
* data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
* may have both an encrypted version and a decrypted version of its data at
* once. When a caller needs a raw arc_buf_t, it is allocated and the data is
* copied out of this header. To avoid complications with b_pabd, raw buffers
* cannot be shared.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_zfs.h>
#include <sys/aggsum.h>
#include <sys/wmsum.h>
#include <cityhash.h>
#include <sys/vdev_trim.h>
#include <sys/zfs_racct.h>
#include <sys/zstd/zstd.h>
#ifndef _KERNEL
/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
boolean_t arc_watch = B_FALSE;
#endif
/*
* This thread's job is to keep enough free memory in the system, by
* calling arc_kmem_reap_soon() plus arc_reduce_target_size(), which improves
* arc_available_memory().
*/
static zthr_t *arc_reap_zthr;
/*
* This thread's job is to keep arc_size under arc_c, by calling
* arc_evict(), which improves arc_is_overflowing().
*/
static zthr_t *arc_evict_zthr;
static arc_buf_hdr_t **arc_state_evict_markers;
static int arc_state_evict_marker_count;
static kmutex_t arc_evict_lock;
static boolean_t arc_evict_needed = B_FALSE;
static clock_t arc_last_uncached_flush;
/*
* Count of bytes evicted since boot.
*/
static uint64_t arc_evict_count;
/*
* List of arc_evict_waiter_t's, representing threads waiting for the
* arc_evict_count to reach specific values.
*/
static list_t arc_evict_waiters;
/*
* When arc_is_overflowing(), arc_get_data_impl() waits for this percent of
* the requested amount of data to be evicted. For example, by default for
* every 2KB that's evicted, 1KB of it may be "reused" by a new allocation.
* Since this is above 100%, it ensures that progress is made towards getting
* arc_size under arc_c. Since this is finite, it ensures that allocations
* can still happen, even during the potentially long time that arc_size is
* more than arc_c.
*/
static uint_t zfs_arc_eviction_pct = 200;
/*
* The number of headers to evict in arc_evict_state_impl() before
* dropping the sublist lock and evicting from another sublist. A lower
* value means we're more likely to evict the "correct" header (i.e. the
* oldest header in the arc state), but comes with higher overhead
* (i.e. more invocations of arc_evict_state_impl()).
*/
static uint_t zfs_arc_evict_batch_limit = 10;
/* number of seconds before growing cache again */
uint_t arc_grow_retry = 5;
/*
* Minimum time between calls to arc_kmem_reap_soon().
*/
static const int arc_kmem_cache_reap_retry_ms = 1000;
/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
static int zfs_arc_overflow_shift = 8;
/* log2(fraction of arc to reclaim) */
uint_t arc_shrink_shift = 7;
/* percent of pagecache to reclaim arc to */
#ifdef _KERNEL
uint_t zfs_arc_pc_percent = 0;
#endif
/*
* log2(fraction of ARC which must be free to allow growing).
* I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
* when reading a new block into the ARC, we will evict an equal-sized block
* from the ARC.
*
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
* we will still not allow it to grow.
*/
uint_t arc_no_grow_shift = 5;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
static uint_t arc_min_prefetch_ms;
static uint_t arc_min_prescient_prefetch_ms;
/*
* If this percent of memory is free, don't throttle.
*/
uint_t arc_lotsfree_percent = 10;
/*
* The arc has filled available memory and has now warmed up.
*/
boolean_t arc_warm;
/*
* These tunables are for performance analysis.
*/
uint64_t zfs_arc_max = 0;
uint64_t zfs_arc_min = 0;
static uint64_t zfs_arc_dnode_limit = 0;
static uint_t zfs_arc_dnode_reduce_percent = 10;
static uint_t zfs_arc_grow_retry = 0;
static uint_t zfs_arc_shrink_shift = 0;
uint_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
/*
* ARC dirty data constraints for arc_tempreserve_space() throttle:
* * total dirty data limit
* * anon block dirty limit
* * each pool's anon allowance
*/
static const unsigned long zfs_arc_dirty_limit_percent = 50;
static const unsigned long zfs_arc_anon_limit_percent = 25;
static const unsigned long zfs_arc_pool_dirty_percent = 20;
/*
* Enable or disable compressed arc buffers.
*/
int zfs_compressed_arc_enabled = B_TRUE;
/*
* Balance between metadata and data on ghost hits. Values above 100
* increase metadata caching by proportionally reducing effect of ghost
* data hits on target data/metadata rate.
*/
static uint_t zfs_arc_meta_balance = 500;
/*
* Percentage that can be consumed by dnodes of ARC meta buffers.
*/
static uint_t zfs_arc_dnode_limit_percent = 10;
/*
* These tunables are Linux-specific
*/
static uint64_t zfs_arc_sys_free = 0;
static uint_t zfs_arc_min_prefetch_ms = 0;
static uint_t zfs_arc_min_prescient_prefetch_ms = 0;
static uint_t zfs_arc_lotsfree_percent = 10;
/*
* Number of arc_prune threads
*/
static int zfs_arc_prune_task_threads = 1;
/* Used by spa_export/spa_destroy to flush the arc asynchronously */
static taskq_t *arc_flush_taskq;
/* The 7 states: */
arc_state_t ARC_anon;
arc_state_t ARC_mru;
arc_state_t ARC_mru_ghost;
arc_state_t ARC_mfu;
arc_state_t ARC_mfu_ghost;
arc_state_t ARC_l2c_only;
arc_state_t ARC_uncached;
arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "iohits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "demand_data_hits", KSTAT_DATA_UINT64 },
{ "demand_data_iohits", KSTAT_DATA_UINT64 },
{ "demand_data_misses", KSTAT_DATA_UINT64 },
{ "demand_metadata_hits", KSTAT_DATA_UINT64 },
{ "demand_metadata_iohits", KSTAT_DATA_UINT64 },
{ "demand_metadata_misses", KSTAT_DATA_UINT64 },
{ "prefetch_data_hits", KSTAT_DATA_UINT64 },
{ "prefetch_data_iohits", KSTAT_DATA_UINT64 },
{ "prefetch_data_misses", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_iohits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
{ "mru_hits", KSTAT_DATA_UINT64 },
{ "mru_ghost_hits", KSTAT_DATA_UINT64 },
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "uncached_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "access_skip", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
{ "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mfu", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mru", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
{ "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "meta", KSTAT_DATA_UINT64 },
{ "pd", KSTAT_DATA_UINT64 },
{ "pm", KSTAT_DATA_UINT64 },
{ "c", KSTAT_DATA_UINT64 },
{ "c_min", KSTAT_DATA_UINT64 },
{ "c_max", KSTAT_DATA_UINT64 },
{ "size", KSTAT_DATA_UINT64 },
{ "compressed_size", KSTAT_DATA_UINT64 },
{ "uncompressed_size", KSTAT_DATA_UINT64 },
{ "overhead_size", KSTAT_DATA_UINT64 },
{ "hdr_size", KSTAT_DATA_UINT64 },
{ "data_size", KSTAT_DATA_UINT64 },
{ "metadata_size", KSTAT_DATA_UINT64 },
{ "dbuf_size", KSTAT_DATA_UINT64 },
{ "dnode_size", KSTAT_DATA_UINT64 },
{ "bonus_size", KSTAT_DATA_UINT64 },
#if defined(COMPAT_FREEBSD11)
{ "other_size", KSTAT_DATA_UINT64 },
#endif
{ "anon_size", KSTAT_DATA_UINT64 },
{ "anon_data", KSTAT_DATA_UINT64 },
{ "anon_metadata", KSTAT_DATA_UINT64 },
{ "anon_evictable_data", KSTAT_DATA_UINT64 },
{ "anon_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_size", KSTAT_DATA_UINT64 },
{ "mru_data", KSTAT_DATA_UINT64 },
{ "mru_metadata", KSTAT_DATA_UINT64 },
{ "mru_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_size", KSTAT_DATA_UINT64 },
{ "mru_ghost_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_size", KSTAT_DATA_UINT64 },
{ "mfu_data", KSTAT_DATA_UINT64 },
{ "mfu_metadata", KSTAT_DATA_UINT64 },
{ "mfu_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_size", KSTAT_DATA_UINT64 },
{ "mfu_ghost_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "uncached_size", KSTAT_DATA_UINT64 },
{ "uncached_data", KSTAT_DATA_UINT64 },
{ "uncached_metadata", KSTAT_DATA_UINT64 },
{ "uncached_evictable_data", KSTAT_DATA_UINT64 },
{ "uncached_evictable_metadata", KSTAT_DATA_UINT64 },
{ "l2_hits", KSTAT_DATA_UINT64 },
{ "l2_misses", KSTAT_DATA_UINT64 },
{ "l2_prefetch_asize", KSTAT_DATA_UINT64 },
{ "l2_mru_asize", KSTAT_DATA_UINT64 },
{ "l2_mfu_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_data_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_metadata_asize", KSTAT_DATA_UINT64 },
{ "l2_feeds", KSTAT_DATA_UINT64 },
{ "l2_rw_clash", KSTAT_DATA_UINT64 },
{ "l2_read_bytes", KSTAT_DATA_UINT64 },
{ "l2_write_bytes", KSTAT_DATA_UINT64 },
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
{ "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_asize", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
{ "l2_log_blk_writes", KSTAT_DATA_UINT64 },
{ "l2_log_blk_avg_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_count", KSTAT_DATA_UINT64 },
{ "l2_data_to_meta_ratio", KSTAT_DATA_UINT64 },
{ "l2_rebuild_success", KSTAT_DATA_UINT64 },
{ "l2_rebuild_unsupported", KSTAT_DATA_UINT64 },
{ "l2_rebuild_io_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_dh_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_cksum_lb_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_lowmem", KSTAT_DATA_UINT64 },
{ "l2_rebuild_size", KSTAT_DATA_UINT64 },
{ "l2_rebuild_asize", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs_precached", KSTAT_DATA_UINT64 },
{ "l2_rebuild_log_blks", KSTAT_DATA_UINT64 },
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "memory_direct_count", KSTAT_DATA_UINT64 },
{ "memory_indirect_count", KSTAT_DATA_UINT64 },
{ "memory_all_bytes", KSTAT_DATA_UINT64 },
{ "memory_free_bytes", KSTAT_DATA_UINT64 },
{ "memory_available_bytes", KSTAT_DATA_INT64 },
{ "arc_no_grow", KSTAT_DATA_UINT64 },
{ "arc_tempreserve", KSTAT_DATA_UINT64 },
{ "arc_loaned_bytes", KSTAT_DATA_UINT64 },
{ "arc_prune", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_dnode_limit", KSTAT_DATA_UINT64 },
{ "async_upgrade_sync", KSTAT_DATA_UINT64 },
{ "predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_iohit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "prescient_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "demand_iohit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "arc_need_free", KSTAT_DATA_UINT64 },
{ "arc_sys_free", KSTAT_DATA_UINT64 },
{ "arc_raw_size", KSTAT_DATA_UINT64 },
{ "cached_only_in_progress", KSTAT_DATA_UINT64 },
{ "abd_chunk_waste_size", KSTAT_DATA_UINT64 },
};
arc_sums_t arc_sums;
#define ARCSTAT_MAX(stat, val) { \
uint64_t m; \
while ((val) > (m = arc_stats.stat.value.ui64) && \
(m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
continue; \
}
/*
* We define a macro to allow ARC hits/misses to be easily broken down by
* two separate conditions, giving a total of four different subtypes for
* each of hits and misses (so eight statistics total).
*/
#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
if (cond1) { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
} \
} else { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
} \
}
/*
* This macro allows us to use kstats as floating averages. Each time we
* update this kstat, we first factor it and the update value by
* ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall
* average. This macro assumes that integer loads and stores are atomic, but
* is not safe for multiple writers updating the kstat in parallel (only the
* last writer's update will remain).
*/
#define ARCSTAT_F_AVG_FACTOR 3
#define ARCSTAT_F_AVG(stat, value) \
do { \
uint64_t x = ARCSTAT(stat); \
x = x - x / ARCSTAT_F_AVG_FACTOR + \
(value) / ARCSTAT_F_AVG_FACTOR; \
ARCSTAT(stat) = x; \
} while (0)
static kstat_t *arc_ksp;
/*
* There are several ARC variables that are critical to export as kstats --
* but we don't want to have to grovel around in the kstat whenever we wish to
* manipulate them. For these variables, we therefore define them to be in
* terms of the statistic variable. This assures that we are not introducing
* the possibility of inconsistency by having shadow copies of the variables,
* while still allowing the code to be readable.
*/
#define arc_tempreserve ARCSTAT(arcstat_tempreserve)
#define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
#define arc_dnode_limit ARCSTAT(arcstat_dnode_limit) /* max size for dnodes */
#define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */
hrtime_t arc_growtime;
list_t arc_prune_list;
kmutex_t arc_prune_mtx;
taskq_t *arc_prune_taskq;
#define GHOST_STATE(state) \
((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
(state) == arc_l2c_only)
#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
#define HDR_PRESCIENT_PREFETCH(hdr) \
((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
#define HDR_COMPRESSION_ENABLED(hdr) \
((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
#define HDR_UNCACHED(hdr) ((hdr)->b_flags & ARC_FLAG_UNCACHED)
#define HDR_L2_READING(hdr) \
(((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
#define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
#define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
#define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
#define HDR_ISTYPE_METADATA(hdr) \
((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
#define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
#define HDR_HAS_RABD(hdr) \
(HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \
(hdr)->b_crypt_hdr.b_rabd != NULL)
#define HDR_ENCRYPTED(hdr) \
(HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
#define HDR_AUTHENTICATED(hdr) \
(HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
#define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
#define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
#define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
#define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
/*
* Other sizes
*/
#define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
#define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
/*
* Hash table routines
*/
#define BUF_LOCKS 2048
typedef struct buf_hash_table {
uint64_t ht_mask;
arc_buf_hdr_t **ht_table;
kmutex_t ht_locks[BUF_LOCKS] ____cacheline_aligned;
} buf_hash_table_t;
static buf_hash_table_t buf_hash_table;
#define BUF_HASH_INDEX(spa, dva, birth) \
(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
#define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
#define HDR_LOCK(hdr) \
(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
uint64_t zfs_crc64_table[256];
/*
* Asynchronous ARC flush
*
* We track these in a list for arc_async_flush_guid_inuse().
* Used for both L1 and L2 async teardown.
*/
static list_t arc_async_flush_list;
static kmutex_t arc_async_flush_lock;
typedef struct arc_async_flush {
uint64_t af_spa_guid;
taskq_ent_t af_tqent;
uint_t af_cache_level; /* 1 or 2 to differentiate node */
list_node_t af_node;
} arc_async_flush_t;
/*
* Level 2 ARC
*/
#define L2ARC_WRITE_SIZE (32 * 1024 * 1024) /* initial write max */
#define L2ARC_HEADROOM 8 /* num of writes */
/*
* If we discover during ARC scan any buffers to be compressed, we boost
* our headroom for the next scanning cycle by this percentage multiple.
*/
#define L2ARC_HEADROOM_BOOST 200
#define L2ARC_FEED_SECS 1 /* caching interval secs */
#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
/*
* We can feed L2ARC from two states of ARC buffers, mru and mfu,
* and each of the state has two types: data and metadata.
*/
#define L2ARC_FEED_TYPES 4
/* L2ARC Performance Tunables */
uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
uint64_t l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
int l2arc_feed_again = B_TRUE; /* turbo warmup */
int l2arc_norw = B_FALSE; /* no reads during writes */
static uint_t l2arc_meta_percent = 33; /* limit on headers size */
/*
* L2ARC Internals
*/
static list_t L2ARC_dev_list; /* device list */
static list_t *l2arc_dev_list; /* device list pointer */
static kmutex_t l2arc_dev_mtx; /* device list mutex */
static l2arc_dev_t *l2arc_dev_last; /* last device used */
static list_t L2ARC_free_on_write; /* free after write buf list */
static list_t *l2arc_free_on_write; /* free after write list ptr */
static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
static uint64_t l2arc_ndev; /* number of devices */
typedef struct l2arc_read_callback {
arc_buf_hdr_t *l2rcb_hdr; /* read header */
blkptr_t l2rcb_bp; /* original blkptr */
zbookmark_phys_t l2rcb_zb; /* original bookmark */
int l2rcb_flags; /* original flags */
abd_t *l2rcb_abd; /* temporary buffer */
} l2arc_read_callback_t;
typedef struct l2arc_data_free {
/* protected by l2arc_free_on_write_mtx */
abd_t *l2df_abd;
size_t l2df_size;
arc_buf_contents_t l2df_type;
list_node_t l2df_list_node;
} l2arc_data_free_t;
typedef enum arc_fill_flags {
ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */
ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */
ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */
ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */
ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */
} arc_fill_flags_t;
typedef enum arc_ovf_level {
ARC_OVF_NONE, /* ARC within target size. */
ARC_OVF_SOME, /* ARC is slightly overflowed. */
ARC_OVF_SEVERE /* ARC is severely overflowed. */
} arc_ovf_level_t;
static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
static kmutex_t l2arc_rebuild_thr_lock;
static kcondvar_t l2arc_rebuild_thr_cv;
enum arc_hdr_alloc_flags {
ARC_HDR_ALLOC_RDATA = 0x1,
ARC_HDR_USE_RESERVE = 0x4,
ARC_HDR_ALLOC_LINEAR = 0x8,
};
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, const void *, int);
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, const void *);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, const void *, int);
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, const void *);
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, const void *);
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size,
const void *tag);
static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, int);
static void arc_hdr_destroy(arc_buf_hdr_t *);
static void arc_access(arc_buf_hdr_t *, arc_flags_t, boolean_t);
static void arc_buf_watch(arc_buf_t *);
static void arc_change_state(arc_state_t *, arc_buf_hdr_t *);
static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
static void l2arc_read_done(zio_t *);
static void l2arc_do_free_on_write(void);
static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only);
static void arc_prune_async(uint64_t adjust);
#define l2arc_hdr_arcstats_increment(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE)
#define l2arc_hdr_arcstats_decrement(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_FALSE)
#define l2arc_hdr_arcstats_increment_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_TRUE)
#define l2arc_hdr_arcstats_decrement_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_TRUE)
/*
* l2arc_exclude_special : A zfs module parameter that controls whether buffers
* present on special vdevs are eligibile for caching in L2ARC. If
* set to 1, exclude dbufs on special vdevs from being cached to
* L2ARC.
*/
int l2arc_exclude_special = 0;
/*
* l2arc_mfuonly : A ZFS module parameter that controls whether only MFU
* metadata and data are cached from ARC into L2ARC.
*/
static int l2arc_mfuonly = 0;
/*
* L2ARC TRIM
* l2arc_trim_ahead : A ZFS module parameter that controls how much ahead of
* the current write size (l2arc_write_max) we should TRIM if we
* have filled the device. It is defined as a percentage of the
* write size. If set to 100 we trim twice the space required to
* accommodate upcoming writes. A minimum of 64MB will be trimmed.
* It also enables TRIM of the whole L2ARC device upon creation or
* addition to an existing pool or if the header of the device is
* invalid upon importing a pool or onlining a cache device. The
* default is 0, which disables TRIM on L2ARC altogether as it can
* put significant stress on the underlying storage devices. This
* will vary depending of how well the specific device handles
* these commands.
*/
static uint64_t l2arc_trim_ahead = 0;
/*
* Performance tuning of L2ARC persistence:
*
* l2arc_rebuild_enabled : A ZFS module parameter that controls whether adding
* an L2ARC device (either at pool import or later) will attempt
* to rebuild L2ARC buffer contents.
* l2arc_rebuild_blocks_min_l2size : A ZFS module parameter that controls
* whether log blocks are written to the L2ARC device. If the L2ARC
* device is less than 1GB, the amount of data l2arc_evict()
* evicts is significant compared to the amount of restored L2ARC
* data. In this case do not write log blocks in L2ARC in order
* not to waste space.
*/
static int l2arc_rebuild_enabled = B_TRUE;
static uint64_t l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024;
/* L2ARC persistence rebuild control routines. */
void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen);
static __attribute__((noreturn)) void l2arc_dev_rebuild_thread(void *arg);
static int l2arc_rebuild(l2arc_dev_t *dev);
/* L2ARC persistence read I/O routines. */
static int l2arc_dev_hdr_read(l2arc_dev_t *dev);
static int l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lp, const l2arc_log_blkptr_t *next_lp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io);
static zio_t *l2arc_log_blk_fetch(vdev_t *vd,
const l2arc_log_blkptr_t *lp, l2arc_log_blk_phys_t *lb);
static void l2arc_log_blk_fetch_abort(zio_t *zio);
/* L2ARC persistence block restoration routines. */
static void l2arc_log_blk_restore(l2arc_dev_t *dev,
const l2arc_log_blk_phys_t *lb, uint64_t lb_asize);
static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le,
l2arc_dev_t *dev);
/* L2ARC persistence write I/O routines. */
static uint64_t l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
l2arc_write_callback_t *cb);
/* L2ARC persistence auxiliary routines. */
boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *lbp);
static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev,
const arc_buf_hdr_t *ab);
boolean_t l2arc_range_check_overlap(uint64_t bottom,
uint64_t top, uint64_t check);
static void l2arc_blk_fetch_done(zio_t *zio);
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev);
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
{
return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth));
}
#define HDR_EMPTY(hdr) \
((hdr)->b_dva.dva_word[0] == 0 && \
(hdr)->b_dva.dva_word[1] == 0)
#define HDR_EMPTY_OR_LOCKED(hdr) \
(HDR_EMPTY(hdr) || MUTEX_HELD(HDR_LOCK(hdr)))
#define HDR_EQUAL(spa, dva, birth, hdr) \
((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
static void
buf_discard_identity(arc_buf_hdr_t *hdr)
{
hdr->b_dva.dva_word[0] = 0;
hdr->b_dva.dva_word[1] = 0;
hdr->b_birth = 0;
}
static arc_buf_hdr_t *
buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t birth = BP_GET_BIRTH(bp);
uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *hdr;
mutex_enter(hash_lock);
for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
hdr = hdr->b_hash_next) {
if (HDR_EQUAL(spa, dva, birth, hdr)) {
*lockp = hash_lock;
return (hdr);
}
}
mutex_exit(hash_lock);
*lockp = NULL;
return (NULL);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
* If lockp == NULL, the caller is assumed to already hold the hash lock.
*/
static arc_buf_hdr_t *
buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
{
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *fhdr;
uint32_t i;
ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
ASSERT(hdr->b_birth != 0);
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (lockp != NULL) {
*lockp = hash_lock;
mutex_enter(hash_lock);
} else {
ASSERT(MUTEX_HELD(hash_lock));
}
for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
fhdr = fhdr->b_hash_next, i++) {
if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
return (fhdr);
}
hdr->b_hash_next = buf_hash_table.ht_table[idx];
buf_hash_table.ht_table[idx] = hdr;
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
if (i > 0) {
ARCSTAT_BUMP(arcstat_hash_collisions);
if (i == 1)
ARCSTAT_BUMP(arcstat_hash_chains);
ARCSTAT_MAX(arcstat_hash_chain_max, i);
}
ARCSTAT_BUMP(arcstat_hash_elements);
return (NULL);
}
static void
buf_hash_remove(arc_buf_hdr_t *hdr)
{
arc_buf_hdr_t *fhdr, **hdrp;
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
ASSERT(HDR_IN_HASH_TABLE(hdr));
hdrp = &buf_hash_table.ht_table[idx];
while ((fhdr = *hdrp) != hdr) {
ASSERT3P(fhdr, !=, NULL);
hdrp = &fhdr->b_hash_next;
}
*hdrp = hdr->b_hash_next;
hdr->b_hash_next = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
ARCSTAT_BUMPDOWN(arcstat_hash_elements);
if (buf_hash_table.ht_table[idx] &&
buf_hash_table.ht_table[idx]->b_hash_next == NULL)
ARCSTAT_BUMPDOWN(arcstat_hash_chains);
}
/*
* Global data structures and functions for the buf kmem cache.
*/
static kmem_cache_t *hdr_full_cache;
static kmem_cache_t *hdr_l2only_cache;
static kmem_cache_t *buf_cache;
static void
buf_fini(void)
{
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel\
*/
vmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#else
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#endif
for (int i = 0; i < BUF_LOCKS; i++)
mutex_destroy(BUF_HASH_LOCK(i));
kmem_cache_destroy(hdr_full_cache);
kmem_cache_destroy(hdr_l2only_cache);
kmem_cache_destroy(buf_cache);
}
/*
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
static int
hdr_full_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
memset(hdr, 0, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
#ifdef ZFS_DEBUG
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
#endif
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
list_link_init(&hdr->b_l2hdr.b_l2node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
return (0);
}
static int
hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
memset(hdr, 0, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
}
static int
buf_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_t *buf = vbuf;
memset(buf, 0, sizeof (arc_buf_t));
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
return (0);
}
/*
* Destructor callback - called when a cached buf is
* no longer required.
*/
static void
hdr_full_dest(void *vbuf, void *unused)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
#ifdef ZFS_DEBUG
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
#endif
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
static void
hdr_l2only_dest(void *vbuf, void *unused)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
}
static void
buf_dest(void *vbuf, void *unused)
{
(void) unused;
(void) vbuf;
arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
}
static void
buf_init(void)
{
uint64_t *ct = NULL;
uint64_t hsize = 1ULL << 12;
int i, j;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory())
hsize <<= 1;
retry:
buf_hash_table.ht_mask = hsize - 1;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
buf_hash_table.ht_table =
vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
#else
buf_hash_table.ht_table =
kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
#endif
if (buf_hash_table.ht_table == NULL) {
ASSERT(hsize > (1ULL << 8));
hsize >>= 1;
goto retry;
}
hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, KMC_RECLAIMABLE);
hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL,
NULL, NULL, 0);
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < 256; i++)
for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
for (i = 0; i < BUF_LOCKS; i++)
mutex_init(BUF_HASH_LOCK(i), NULL, MUTEX_DEFAULT, NULL);
}
#define ARC_MINTIME (hz>>4) /* 62 ms */
/*
* This is the size that the buf occupies in memory. If the buf is compressed,
* it will correspond to the compressed size. You should use this method of
* getting the buf size unless you explicitly need the logical size.
*/
uint64_t
arc_buf_size(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
}
uint64_t
arc_buf_lsize(arc_buf_t *buf)
{
return (HDR_GET_LSIZE(buf->b_hdr));
}
/*
* This function will return B_TRUE if the buffer is encrypted in memory.
* This buffer can be decrypted by calling arc_untransform().
*/
boolean_t
arc_is_encrypted(arc_buf_t *buf)
{
return (ARC_BUF_ENCRYPTED(buf) != 0);
}
/*
* Returns B_TRUE if the buffer represents data that has not had its MAC
* verified yet.
*/
boolean_t
arc_is_unauthenticated(arc_buf_t *buf)
{
return (HDR_NOAUTH(buf->b_hdr) != 0);
}
void
arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
uint8_t *iv, uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_PROTECTED(hdr));
memcpy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
memcpy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
memcpy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
/*
* Indicates how this buffer is compressed in memory. If it is not compressed
* the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
* arc_untransform() as long as it is also unencrypted.
*/
enum zio_compress
arc_get_compression(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
}
/*
* Return the compression algorithm used to store this data in the ARC. If ARC
* compression is enabled or this is an encrypted block, this will be the same
* as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
*/
static inline enum zio_compress
arc_hdr_get_compress(arc_buf_hdr_t *hdr)
{
return (HDR_COMPRESSION_ENABLED(hdr) ?
HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF);
}
uint8_t
arc_get_complevel(arc_buf_t *buf)
{
return (buf->b_hdr->b_complevel);
}
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
boolean_t shared = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
EQUIV(shared, ARC_BUF_SHARED(buf));
IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
/*
* It would be nice to assert arc_can_share() too, but the "hdr isn't
* already being shared" requirement prevents us from doing that.
*/
return (shared);
}
/*
* Free the checksum associated with this header. If there is no checksum, this
* is a no-op.
*/
static inline void
arc_cksum_free(arc_buf_hdr_t *hdr)
{
#ifdef ZFS_DEBUG
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
hdr->b_l1hdr.b_freeze_cksum = NULL;
}
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
#endif
}
/*
* Return true iff at least one of the bufs on hdr is not compressed.
* Encrypted buffers count as compressed.
*/
static boolean_t
arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
{
ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr));
for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
if (!ARC_BUF_COMPRESSED(b)) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
* matches the checksum that is stored in the hdr. If there is no checksum,
* or if the buf is compressed, this is a no-op.
*/
static void
arc_cksum_verify(arc_buf_t *buf)
{
#ifdef ZFS_DEBUG
arc_buf_hdr_t *hdr = buf->b_hdr;
zio_cksum_t zc;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
panic("buffer modified while frozen!");
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
#endif
}
/*
* This function makes the assumption that data stored in the L2ARC
* will be transformed exactly as it is in the main pool. Because of
* this we can verify the checksum against the reading process's bp.
*/
static boolean_t
arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
{
ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
/*
* Block pointers always store the checksum for the logical data.
* If the block pointer has the gang bit set, then the checksum
* it represents is for the reconstituted data and not for an
* individual gang member. The zio pipeline, however, must be able to
* determine the checksum of each of the gang constituents so it
* treats the checksum comparison differently than what we need
* for l2arc blocks. This prevents us from using the
* zio_checksum_error() interface directly. Instead we must call the
* zio_checksum_error_impl() so that we can ensure the checksum is
* generated using the correct checksum algorithm and accounts for the
* logical I/O size and not just a gang fragment.
*/
return (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
zio->io_offset, NULL) == 0);
}
/*
* Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
* checksum and attaches it to the buf's hdr so that we can ensure that the buf
* isn't modified later on. If buf is compressed or there is already a checksum
* on the hdr, this is a no-op (we only checksum uncompressed bufs).
*/
static void
arc_cksum_compute(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
#ifdef ZFS_DEBUG
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(!ARC_BUF_COMPRESSED(buf));
hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
KM_SLEEP);
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
hdr->b_l1hdr.b_freeze_cksum);
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
#endif
arc_buf_watch(buf);
}
#ifndef _KERNEL
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
(void) sig, (void) unused;
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
static void
arc_buf_unwatch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch) {
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
#else
(void) buf;
#endif
}
static void
arc_buf_watch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch)
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ));
#else
(void) buf;
#endif
}
static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t *hdr)
{
arc_buf_contents_t type;
if (HDR_ISTYPE_METADATA(hdr)) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
}
VERIFY3U(hdr->b_type, ==, type);
return (type);
}
boolean_t
arc_is_metadata(arc_buf_t *buf)
{
return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
}
static uint32_t
arc_bufc_to_flags(arc_buf_contents_t type)
{
switch (type) {
case ARC_BUFC_DATA:
/* metadata field is 0 if buffer contains normal data */
return (0);
case ARC_BUFC_METADATA:
return (ARC_FLAG_BUFC_METADATA);
default:
break;
}
panic("undefined ARC buffer type!");
return ((uint32_t)-1);
}
void
arc_buf_thaw(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
arc_cksum_verify(buf);
/*
* Compressed buffers do not manipulate the b_freeze_cksum.
*/
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
}
void
arc_buf_freeze(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(buf->b_hdr));
arc_cksum_compute(buf);
}
/*
* The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
* the following functions should be used to ensure that the flags are
* updated in a thread-safe way. When manipulating the flags either
* the hash_lock must be held or the hdr must be undiscoverable. This
* ensures that we're not racing with any other threads when updating
* the flags.
*/
static inline void
arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags |= flags;
}
static inline void
arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags &= ~flags;
}
/*
* Setting the compression bits in the arc_buf_hdr_t's b_flags is
* done in a special way since we have to clear and set bits
* at the same time. Consumers that wish to set the compression bits
* must use this function to ensure that the flags are updated in
* thread-safe manner.
*/
static void
arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Holes and embedded blocks will always have a psize = 0 so
* we ignore the compression of the blkptr and set the
* want to uncompress them. Mark them as uncompressed.
*/
if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(HDR_COMPRESSION_ENABLED(hdr));
}
HDR_SET_COMPRESS(hdr, cmp);
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
}
/*
* Looks for another buf on the same hdr which has the data decompressed, copies
* from it, and returns true. If no such buf exists, returns false.
*/
static boolean_t
arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t copied = B_FALSE;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(!ARC_BUF_COMPRESSED(buf));
for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
from = from->b_next) {
/* can't use our own data buffer */
if (from == buf) {
continue;
}
if (!ARC_BUF_COMPRESSED(from)) {
memcpy(buf->b_data, from->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
}
#ifdef ZFS_DEBUG
/*
* There were no decompressed bufs, so there should not be a
* checksum on the hdr either.
*/
if (zfs_flags & ZFS_DEBUG_MODIFY)
EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
#endif
return (copied);
}
/*
* Allocates an ARC buf header that's in an evicted & L2-cached state.
* This is used during l2arc reconstruction to make empty ARC buffers
* which circumvent the regular disk->arc->l2arc path and instead come
* into being in the reverse order, i.e. l2arc->arc.
*/
static arc_buf_hdr_t *
arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev,
dva_t dva, uint64_t daddr, int32_t psize, uint64_t asize, uint64_t birth,
enum zio_compress compress, uint8_t complevel, boolean_t protected,
boolean_t prefetch, arc_state_type_t arcs_state)
{
arc_buf_hdr_t *hdr;
ASSERT(size != 0);
ASSERT(dev->l2ad_vdev != NULL);
hdr = kmem_cache_alloc(hdr_l2only_cache, KM_SLEEP);
hdr->b_birth = birth;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L2HDR);
HDR_SET_LSIZE(hdr, size);
HDR_SET_PSIZE(hdr, psize);
HDR_SET_L2SIZE(hdr, asize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
if (prefetch)
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa);
hdr->b_dva = dva;
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_daddr = daddr;
hdr->b_l2hdr.b_arcs_state = arcs_state;
return (hdr);
}
/*
* Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
*/
static uint64_t
arc_hdr_size(arc_buf_hdr_t *hdr)
{
uint64_t size;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
HDR_GET_PSIZE(hdr) > 0) {
size = HDR_GET_PSIZE(hdr);
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
size = HDR_GET_LSIZE(hdr);
}
return (size);
}
static int
arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
{
int ret;
uint64_t csize;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
abd_t *abd = hdr->b_l1hdr.b_pabd;
boolean_t free_abd = B_FALSE;
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_AUTHENTICATED(hdr));
ASSERT3P(abd, !=, NULL);
/*
* The MAC is calculated on the compressed data that is stored on disk.
* However, if compressed arc is disabled we will only have the
* decompressed data available to us now. Compress it into a temporary
* abd so we can verify the MAC. The performance overhead of this will
* be relatively low, since most objects in an encrypted objset will
* be encrypted (instead of authenticated) anyway.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd = NULL;
csize = zio_compress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, &abd, lsize, MIN(lsize, psize),
hdr->b_complevel);
if (csize >= lsize || csize > psize) {
ret = SET_ERROR(EIO);
return (ret);
}
ASSERT3P(abd, !=, NULL);
abd_zero_off(abd, csize, psize - csize);
free_abd = B_TRUE;
}
/*
* Authentication is best effort. We authenticate whenever the key is
* available. If we succeed we clear ARC_FLAG_NOAUTH.
*/
if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) {
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
ASSERT3U(lsize, ==, psize);
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd,
psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
} else {
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize,
hdr->b_crypt_hdr.b_mac);
}
if (ret == 0)
arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH);
else if (ret == ENOENT)
ret = 0;
if (free_abd)
abd_free(abd);
return (ret);
}
/*
* This function will take a header that only has raw encrypted data in
* b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
* b_l1hdr.b_pabd. If designated in the header flags, this function will
* also decompress the data.
*/
static int
arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
{
int ret;
abd_t *cabd = NULL;
boolean_t no_crypt = B_FALSE;
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_ENCRYPTED(hdr));
arc_hdr_alloc_abd(hdr, 0);
ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot,
B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv,
hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd,
hdr->b_crypt_hdr.b_rabd, &no_crypt);
if (ret != 0)
goto error;
if (no_crypt) {
abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
}
/*
* If this header has disabled arc compression but the b_pabd is
* compressed after decrypting it, we need to decompress the newly
* decrypted data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
/*
* We want to make sure that we are correctly honoring the
* zfs_abd_scatter_enabled setting, so we allocate an abd here
* and then loan a buffer from it, rather than allocating a
* linear buffer and wrapping it in an abd later.
*/
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, 0);
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, cabd, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
goto error;
}
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
}
return (0);
error:
arc_hdr_free_abd(hdr, B_FALSE);
if (cabd != NULL)
arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr);
return (ret);
}
/*
* This function is called during arc_buf_fill() to prepare the header's
* abd plaintext pointer for use. This involves authenticated protected
* data and decrypting encrypted data into the plaintext abd.
*/
static int
arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa,
const zbookmark_phys_t *zb, boolean_t noauth)
{
int ret;
ASSERT(HDR_PROTECTED(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
if (HDR_NOAUTH(hdr) && !noauth) {
/*
* The caller requested authenticated data but our data has
* not been authenticated yet. Verify the MAC now if we can.
*/
ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset);
if (ret != 0)
goto error;
} else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) {
/*
* If we only have the encrypted version of the data, but the
* unencrypted version was requested we take this opportunity
* to store the decrypted version in the header for future use.
*/
ret = arc_hdr_decrypt(hdr, spa, zb);
if (ret != 0)
goto error;
}
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (0);
error:
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (ret);
}
/*
* This function is used by the dbuf code to decrypt bonus buffers in place.
* The dbuf code itself doesn't have any locking for decrypting a shared dnode
* block, so we use the hash lock here to protect against concurrent calls to
* arc_buf_fill().
*/
static void
arc_buf_untransform_in_place(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_ENCRYPTED(hdr));
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT3PF(hdr->b_l1hdr.b_pabd, !=, NULL, "hdr %px buf %px", hdr, buf);
zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/*
* Given a buf that has a data buffer attached to it, this function will
* efficiently fill the buf with data of the specified compression setting from
* the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
* are already sharing a data buf, no copy is performed.
*
* If the buf is marked as compressed but uncompressed data was requested, this
* will allocate a new data buffer for the buf, remove that flag, and fill the
* buf with uncompressed data. You can't request a compressed buf on a hdr with
* uncompressed data, and (since we haven't added support for it yet) if you
* want compressed data your buf must already be marked as compressed and have
* the correct-sized data buffer.
*/
static int
arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
arc_fill_flags_t flags)
{
int error = 0;
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t hdr_compressed =
(arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0;
boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0;
dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr);
ASSERT3P(buf->b_data, !=, NULL);
IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf));
IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, HDR_ENCRYPTED(hdr));
IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf));
IMPLY(encrypted, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, !arc_buf_is_shared(buf));
/*
* If the caller wanted encrypted data we just need to copy it from
* b_rabd and potentially byteswap it. We won't be able to do any
* further transforms on it.
*/
if (encrypted) {
ASSERT(HDR_HAS_RABD(hdr));
abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
goto byteswap;
}
/*
* Adjust encrypted and authenticated headers to accommodate
* the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
* allowed to fail decryption due to keys not being loaded
* without being marked as an IO error.
*/
if (HDR_PROTECTED(hdr)) {
error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
zb, !!(flags & ARC_FILL_NOAUTH));
if (error == EACCES && (flags & ARC_FILL_IN_PLACE) != 0) {
return (error);
} else if (error != 0) {
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (error);
}
}
/*
* There is a special case here for dnode blocks which are
* decrypting their bonus buffers. These blocks may request to
* be decrypted in-place. This is necessary because there may
* be many dnodes pointing into this buffer and there is
* currently no method to synchronize replacing the backing
* b_data buffer and updating all of the pointers. Here we use
* the hash lock to ensure there are no races. If the need
* arises for other types to be decrypted in-place, they must
* add handling here as well.
*/
if ((flags & ARC_FILL_IN_PLACE) != 0) {
ASSERT(!hdr_compressed);
ASSERT(!compressed);
ASSERT(!encrypted);
if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_buf_untransform_in_place(buf);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
}
return (0);
}
if (hdr_compressed == compressed) {
if (ARC_BUF_SHARED(buf)) {
ASSERT(arc_buf_is_shared(buf));
} else {
abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
arc_buf_size(buf));
}
} else {
ASSERT(hdr_compressed);
ASSERT(!compressed);
/*
* If the buf is sharing its data with the hdr, unlink it and
* allocate a new data buffer for the buf.
*/
if (ARC_BUF_SHARED(buf)) {
ASSERTF(ARC_BUF_COMPRESSED(buf),
"buf %p was uncompressed", buf);
/* We need to give the buf its own b_data */
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
/* Previously overhead was 0; just add new overhead */
ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT(!arc_buf_is_shared(buf));
/* We need to reallocate the buf's b_data */
arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
buf);
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
/* We increased the size of b_data; update overhead */
ARCSTAT_INCR(arcstat_overhead_size,
HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
}
/*
* Regardless of the buf's previous compression settings, it
* should not be compressed at the end of this function.
*/
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
/*
* Try copying the data from another buf which already has a
* decompressed version. If that's not possible, it's time to
* bite the bullet and decompress the data from the hdr.
*/
if (arc_buf_try_copy_decompressed_data(buf)) {
/* Skip byteswapping and checksumming (already done) */
return (0);
} else {
abd_t dabd;
abd_get_from_buf_struct(&dabd, buf->b_data,
HDR_GET_LSIZE(hdr));
error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, &dabd,
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr),
&hdr->b_complevel);
abd_free(&dabd);
/*
* Absent hardware errors or software bugs, this should
* be impossible, but log it anyway so we can debug it.
*/
if (error != 0) {
zfs_dbgmsg(
"hdr %px, compress %d, psize %d, lsize %d",
hdr, arc_hdr_get_compress(hdr),
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (SET_ERROR(EIO));
}
}
}
byteswap:
/* Byteswap the buf's data if necessary */
if (bswap != DMU_BSWAP_NUMFUNCS) {
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
}
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
return (0);
}
/*
* If this function is being called to decrypt an encrypted buffer or verify an
* authenticated one, the key must be loaded and a mapping must be made
* available in the keystore via spa_keystore_create_mapping() or one of its
* callers.
*/
int
arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
boolean_t in_place)
{
int ret;
arc_fill_flags_t flags = 0;
if (in_place)
flags |= ARC_FILL_IN_PLACE;
ret = arc_buf_fill(buf, spa, zb, flags);
if (ret == ECKSUM) {
/*
* Convert authentication and decryption errors to EIO
* (and generate an ereport) before leaving the ARC.
*/
ret = SET_ERROR(EIO);
spa_log_error(spa, zb, buf->b_hdr->b_birth);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
return (ret);
}
/*
* Increment the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (ARC_BUF_SHARED(buf))
continue;
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Decrement the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (ARC_BUF_SHARED(buf))
continue;
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Add a reference to this hdr indicating that someone is actively
* referencing that memory. When the refcount transitions from 0 to 1,
* we remove it from the respective arc_state_t list to indicate that
* it is not evictable.
*/
static void
add_reference(arc_buf_hdr_t *hdr, const void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(state == arc_anon);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
}
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
state != arc_anon && state != arc_l2c_only) {
/* We don't use the L2-only state list. */
multilist_remove(&state->arcs_list[arc_buf_type(hdr)], hdr);
arc_evictable_space_decrement(hdr, state);
}
}
/*
* Remove a reference from this hdr. When the reference transitions from
* 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
* list making it eligible for eviction.
*/
static int
remove_reference(arc_buf_hdr_t *hdr, const void *tag)
{
int cnt;
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(state == arc_anon || MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(!GHOST_STATE(state)); /* arc_l2c_only counts as a ghost. */
if ((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) != 0)
return (cnt);
if (state == arc_anon) {
arc_hdr_destroy(hdr);
return (0);
}
if (state == arc_uncached && !HDR_PREFETCH(hdr)) {
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
return (0);
}
multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr);
arc_evictable_space_increment(hdr, state);
return (0);
}
/*
* Returns detailed information about a specific arc buffer. When the
* state_index argument is set the function will calculate the arc header
* list position for its arc state. Since this requires a linear traversal
* callers are strongly encourage not to do this. However, it can be helpful
* for targeted analysis so the functionality is provided.
*/
void
arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
{
(void) state_index;
arc_buf_hdr_t *hdr = ab->b_hdr;
l1arc_buf_hdr_t *l1hdr = NULL;
l2arc_buf_hdr_t *l2hdr = NULL;
arc_state_t *state = NULL;
memset(abi, 0, sizeof (arc_buf_info_t));
if (hdr == NULL)
return;
abi->abi_flags = hdr->b_flags;
if (HDR_HAS_L1HDR(hdr)) {
l1hdr = &hdr->b_l1hdr;
state = l1hdr->b_state;
}
if (HDR_HAS_L2HDR(hdr))
l2hdr = &hdr->b_l2hdr;
if (l1hdr) {
abi->abi_bufcnt = 0;
for (arc_buf_t *buf = l1hdr->b_buf; buf; buf = buf->b_next)
abi->abi_bufcnt++;
abi->abi_access = l1hdr->b_arc_access;
abi->abi_mru_hits = l1hdr->b_mru_hits;
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
}
if (l2hdr) {
abi->abi_l2arc_dattr = l2hdr->b_daddr;
abi->abi_l2arc_hits = l2hdr->b_hits;
}
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
abi->abi_state_contents = arc_buf_type(hdr);
abi->abi_size = arc_hdr_size(hdr);
}
/*
* Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr)
{
arc_state_t *old_state;
int64_t refcnt;
boolean_t update_old, update_new;
arc_buf_contents_t type = arc_buf_type(hdr);
/*
* We almost always have an L1 hdr here, since we call arc_hdr_realloc()
* in arc_read() when bringing a buffer out of the L2ARC. However, the
* L1 hdr doesn't always exist when we change state to arc_anon before
* destroying a header, in which case reallocating to add the L1 hdr is
* pointless.
*/
if (HDR_HAS_L1HDR(hdr)) {
old_state = hdr->b_l1hdr.b_state;
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
update_old = (hdr->b_l1hdr.b_buf != NULL ||
hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(GHOST_STATE(old_state), hdr->b_l1hdr.b_buf == NULL);
IMPLY(GHOST_STATE(new_state), hdr->b_l1hdr.b_buf == NULL);
IMPLY(old_state == arc_anon, hdr->b_l1hdr.b_buf == NULL ||
ARC_BUF_LAST(hdr->b_l1hdr.b_buf));
} else {
old_state = arc_l2c_only;
refcnt = 0;
update_old = B_FALSE;
}
update_new = update_old;
if (GHOST_STATE(old_state))
update_old = B_TRUE;
if (GHOST_STATE(new_state))
update_new = B_TRUE;
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(new_state, !=, old_state);
/*
* If this buffer is evictable, transfer it from the
* old state list to the new state list.
*/
if (refcnt == 0) {
if (old_state != arc_anon && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
/* remove_reference() saves on insert. */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
multilist_remove(&old_state->arcs_list[type],
hdr);
arc_evictable_space_decrement(hdr, old_state);
}
}
if (new_state != arc_anon && new_state != arc_l2c_only) {
/*
* An L1 header always exists here, since if we're
* moving to some L1-cached state (i.e. not l2c_only or
* anonymous), we realloc the header to add an L1hdr
* beforehand.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_insert(&new_state->arcs_list[type], hdr);
arc_evictable_space_increment(hdr, new_state);
}
}
ASSERT(!HDR_EMPTY(hdr));
if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
/* adjust state sizes (ignore arc_l2c_only) */
if (update_new && new_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(new_state)) {
/*
* When moving a header to a ghost state, we first
* remove all arc buffers. Thus, we'll have no arc
* buffer to use for the reference. As a result, we
* use the arc header pointer for the reference.
*/
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
HDR_GET_LSIZE(hdr), hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (ARC_BUF_SHARED(buf))
continue;
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
arc_buf_size(buf), buf);
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* When moving a header off of a ghost state,
* the header will not contain any arc buffers.
* We use the arc header pointer for the reference
* which is exactly what we did when we put the
* header on the ghost state.
*/
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
HDR_GET_LSIZE(hdr), hdr);
} else {
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (ARC_BUF_SHARED(buf))
continue;
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
arc_buf_size(buf), buf);
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (HDR_HAS_L1HDR(hdr)) {
hdr->b_l1hdr.b_state = new_state;
if (HDR_HAS_L2HDR(hdr) && new_state != arc_l2c_only) {
l2arc_hdr_arcstats_decrement_state(hdr);
hdr->b_l2hdr.b_arcs_state = new_state->arcs_state;
l2arc_hdr_arcstats_increment_state(hdr);
}
}
}
void
arc_space_consume(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, space);
break;
case ARC_SPACE_DNODE:
ARCSTAT_INCR(arcstat_dnode_size, space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
/*
* Note: this includes space wasted by all scatter ABD's, not
* just those allocated by the ARC. But the vast majority of
* scatter ABD's come from the ARC, because other users are
* very short-lived.
*/
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
ARCSTAT_INCR(arcstat_meta_used, space);
aggsum_add(&arc_sums.arcstat_size, space);
}
void
arc_space_return(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, -space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, -space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, -space);
break;
case ARC_SPACE_DNODE:
ARCSTAT_INCR(arcstat_dnode_size, -space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, -space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, -space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
ARCSTAT_INCR(arcstat_meta_used, -space);
ASSERT(aggsum_compare(&arc_sums.arcstat_size, space) >= 0);
aggsum_add(&arc_sums.arcstat_size, -space);
}
/*
* Given a hdr and a buf, returns whether that buf can share its b_data buffer
* with the hdr's b_pabd.
*/
static boolean_t
arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
/*
* The criteria for sharing a hdr's data are:
* 1. the buffer is not encrypted
* 2. the hdr's compression matches the buf's compression
* 3. the hdr doesn't need to be byteswapped
* 4. the hdr isn't already being shared
* 5. the buf is either compressed or it is the last buf in the hdr list
*
* Criterion #5 maintains the invariant that shared uncompressed
* bufs must be the final buf in the hdr's b_buf list. Reading this, you
* might ask, "if a compressed buf is allocated first, won't that be the
* last thing in the list?", but in that case it's impossible to create
* a shared uncompressed buf anyway (because the hdr must be compressed
* to have the compressed buf). You might also think that #3 is
* sufficient to make this guarantee, however it's possible
* (specifically in the rare L2ARC write race mentioned in
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that
* is shareable, but wasn't at the time of its allocation. Rather than
* allow a new shared uncompressed buf to be created and then shuffle
* the list around to make it the last element, this simply disallows
* sharing if the new buf isn't the first to be added.
*/
ASSERT3P(buf->b_hdr, ==, hdr);
boolean_t hdr_compressed =
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF;
boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
return (!ARC_BUF_ENCRYPTED(buf) &&
buf_compressed == hdr_compressed &&
hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
!HDR_SHARED_DATA(hdr) &&
(ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
}
/*
* Allocate a buf for this hdr. If you care about the data that's in the hdr,
* or if you want a compressed buffer, pass those flags in. Returns 0 if the
* copy was made successfully, or an error code otherwise.
*/
static int
arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
const void *tag, boolean_t encrypted, boolean_t compressed,
boolean_t noauth, boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
arc_fill_flags_t flags = ARC_FILL_LOCKED;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
ASSERT3P(*ret, ==, NULL);
IMPLY(encrypted, compressed);
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
buf->b_hdr = hdr;
buf->b_data = NULL;
buf->b_next = hdr->b_l1hdr.b_buf;
buf->b_flags = 0;
add_reference(hdr, tag);
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Only honor requests for compressed bufs if the hdr is actually
* compressed. This must be overridden if the buffer is encrypted since
* encrypted buffers cannot be decompressed.
*/
if (encrypted) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED;
flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED;
} else if (compressed &&
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
flags |= ARC_FILL_COMPRESSED;
}
if (noauth) {
ASSERT0(encrypted);
flags |= ARC_FILL_NOAUTH;
}
/*
* If the hdr's data can be shared then we share the data buffer and
* set the appropriate bit in the hdr's b_flags to indicate the hdr is
* sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
* buffer to store the buf's data.
*
* There are two additional restrictions here because we're sharing
* hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
* actively involved in an L2ARC write, because if this buf is used by
* an arc_write() then the hdr's data buffer will be released when the
* write completes, even though the L2ARC write might still be using it.
* Second, the hdr's ABD must be linear so that the buf's user doesn't
* need to be ABD-aware. It must be allocated via
* zio_[data_]buf_alloc(), not as a page, because we need to be able
* to abd_release_ownership_of_buf(), which isn't allowed on "linear
* page" buffers because the ABD code needs to handle freeing them
* specially.
*/
boolean_t can_share = arc_can_share(hdr, buf) &&
!HDR_L2_WRITING(hdr) &&
hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(hdr->b_l1hdr.b_pabd) &&
!abd_is_linear_page(hdr->b_l1hdr.b_pabd);
/* Set up b_data and sharing */
if (can_share) {
buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
buf->b_data =
arc_get_data_buf(hdr, arc_buf_size(buf), buf);
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
VERIFY3P(buf->b_data, !=, NULL);
hdr->b_l1hdr.b_buf = buf;
/*
* If the user wants the data from the hdr, we need to either copy or
* decompress the data.
*/
if (fill) {
ASSERT3P(zb, !=, NULL);
return (arc_buf_fill(buf, spa, zb, flags));
}
return (0);
}
static const char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
{
atomic_add_64(&arc_loaned_bytes, delta);
/* assert that it did not wrap around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
}
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
* buffers must be returned to the arc before they can be used by the DMU or
* freed.
*/
arc_buf_t *
arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
{
arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
psize, lsize, compression_type, complevel);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj,
byteorder, salt, iv, mac, ot, psize, lsize, compression_type,
complevel);
atomic_add_64(&arc_loaned_bytes, psize);
return (buf);
}
/*
* Return a loaned arc buffer to the arc.
*/
void
arc_return_buf(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
arc_loaned_bytes_update(-arc_buf_size(buf));
}
/* Detach an arc_buf from a dbuf (tag) */
void
arc_loan_inuse_buf(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
arc_loaned_bytes_update(arc_buf_size(buf));
}
static void
l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
{
l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
df->l2df_abd = abd;
df->l2df_size = size;
df->l2df_type = type;
mutex_enter(&l2arc_free_on_write_mtx);
list_insert_head(l2arc_free_on_write, df);
mutex_exit(&l2arc_free_on_write_mtx);
}
static void
arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, hdr);
}
(void) zfs_refcount_remove_many(&state->arcs_size[type], size, hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
if (free_rdata) {
l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type);
} else {
l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
}
}
/*
* Share the arc_buf_t's data with the hdr. Whenever we are sharing the
* data buffer, we transfer the refcount ownership to the hdr and update
* the appropriate kstats.
*/
static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Start sharing the data buffer. We transfer the
* refcount ownership to the hdr since it always owns
* the refcount whenever an arc_buf_t is shared.
*/
zfs_refcount_transfer_ownership_many(
&hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)],
arc_hdr_size(hdr), buf, hdr);
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
HDR_ISTYPE_METADATA(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
/*
* Since we've transferred ownership to the hdr we need
* to increment its compressed and uncompressed kstats and
* decrement the overhead size.
*/
ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
}
static void
arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* We are no longer sharing this buffer so we need
* to transfer its ownership to the rightful owner.
*/
zfs_refcount_transfer_ownership_many(
&hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)],
arc_hdr_size(hdr), hdr, buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
abd_free(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = NULL;
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
/*
* Since the buffer is no longer shared between
* the arc buf and the hdr, count it as overhead.
*/
ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
/*
* Remove an arc_buf_t from the hdr's buf list and return the last
* arc_buf_t on the list. If no buffers remain on the list then return
* NULL.
*/
static arc_buf_t *
arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
/*
* Remove the buf from the hdr list and locate the last
* remaining buffer on the list.
*/
while (*bufp != NULL) {
if (*bufp == buf)
*bufp = buf->b_next;
/*
* If we've removed a buffer in the middle of
* the list then update the lastbuf and update
* bufp.
*/
if (*bufp != NULL) {
lastbuf = *bufp;
bufp = &(*bufp)->b_next;
}
}
buf->b_next = NULL;
ASSERT3P(lastbuf, !=, buf);
IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
return (lastbuf);
}
/*
* Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
* list and free it.
*/
static void
arc_buf_destroy_impl(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Free up the data associated with the buf but only if we're not
* sharing this with the hdr. If we are sharing it with the hdr, the
* hdr is responsible for doing the free.
*/
if (buf->b_data != NULL) {
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
if (ARC_BUF_SHARED(buf)) {
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
ASSERT(!arc_buf_is_shared(buf));
uint64_t size = arc_buf_size(buf);
arc_free_data_buf(hdr, buf->b_data, size, buf);
ARCSTAT_INCR(arcstat_overhead_size, -size);
}
buf->b_data = NULL;
/*
* If we have no more encrypted buffers and we've already
* gotten a copy of the decrypted data we can free b_rabd
* to save some space.
*/
if (ARC_BUF_ENCRYPTED(buf) && HDR_HAS_RABD(hdr) &&
hdr->b_l1hdr.b_pabd != NULL && !HDR_IO_IN_PROGRESS(hdr)) {
arc_buf_t *b;
for (b = hdr->b_l1hdr.b_buf; b; b = b->b_next) {
if (b != buf && ARC_BUF_ENCRYPTED(b))
break;
}
if (b == NULL)
arc_hdr_free_abd(hdr, B_TRUE);
}
}
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
/*
* If the current arc_buf_t is sharing its data buffer with the
* hdr, then reassign the hdr's b_pabd to share it with the new
* buffer at the end of the list. The shared buffer is always
* the last one on the hdr's buffer list.
*
* There is an equivalent case for compressed bufs, but since
* they aren't guaranteed to be the last buf in the list and
* that is an exceedingly rare case, we just allow that space be
* wasted temporarily. We must also be careful not to share
* encrypted buffers, since they cannot be shared.
*/
if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) {
/* Only one buf can be shared at once */
ASSERT(!arc_buf_is_shared(lastbuf));
/* hdr is uncompressed so can't have compressed buf */
ASSERT(!ARC_BUF_COMPRESSED(lastbuf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
arc_hdr_free_abd(hdr, B_FALSE);
/*
* We must setup a new shared block between the
* last buffer and the hdr. The data would have
* been allocated by the arc buf so we need to transfer
* ownership to the hdr since it's now being shared.
*/
arc_share_buf(hdr, lastbuf);
}
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT3P(lastbuf, !=, NULL);
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
}
/*
* Free the checksum if we're removing the last uncompressed buf from
* this hdr.
*/
if (!arc_hdr_has_uncompressed_buf(hdr)) {
arc_cksum_free(hdr);
}
/* clean up the buf */
buf->b_hdr = NULL;
kmem_cache_free(buf_cache, buf);
}
static void
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
{
uint64_t size;
boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA) != 0);
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata);
IMPLY(alloc_rdata, HDR_PROTECTED(hdr));
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
ARCSTAT_INCR(arcstat_compressed_size, size);
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
}
static void
arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(free_rdata, HDR_HAS_RABD(hdr));
/*
* If the hdr is currently being written to the l2arc then
* we defer freeing the data by adding it to the l2arc_free_on_write
* list. The l2arc will free the data once it's finished
* writing it to the l2arc device.
*/
if (HDR_L2_WRITING(hdr)) {
arc_hdr_free_on_write(hdr, free_rdata);
ARCSTAT_BUMP(arcstat_l2_free_on_write);
} else if (free_rdata) {
arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr);
} else {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr);
}
if (free_rdata) {
hdr->b_crypt_hdr.b_rabd = NULL;
ARCSTAT_INCR(arcstat_raw_size, -size);
} else {
hdr->b_l1hdr.b_pabd = NULL;
}
if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
ARCSTAT_INCR(arcstat_compressed_size, -size);
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
}
/*
* Allocate empty anonymous ARC header. The header will get its identity
* assigned and buffers attached later as part of read or write operations.
*
* In case of read arc_read() assigns header its identify (b_dva + b_birth),
* inserts it into ARC hash to become globally visible and allocates physical
* (b_pabd) or raw (b_rabd) ABD buffer to read into from disk. On disk read
* completion arc_read_done() allocates ARC buffer(s) as needed, potentially
* sharing one of them with the physical ABD buffer.
*
* In case of write arc_alloc_buf() allocates ARC buffer to be filled with
* data. Then after compression and/or encryption arc_write_ready() allocates
* and fills (or potentially shares) physical (b_pabd) or raw (b_rabd) ABD
* buffer. On disk write completion arc_write_done() assigns the header its
* new identity (b_dva + b_birth) and inserts into ARC hash.
*
* In case of partial overwrite the old data is read first as described. Then
* arc_release() either allocates new anonymous ARC header and moves the ARC
* buffer to it, or reuses the old ARC header by discarding its identity and
* removing it from ARC hash. After buffer modification normal write process
* follows as described.
*/
static arc_buf_hdr_t *
arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
boolean_t protected, enum zio_compress compression_type, uint8_t complevel,
arc_buf_contents_t type)
{
arc_buf_hdr_t *hdr;
VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
ASSERT(HDR_EMPTY(hdr));
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
hdr->b_spa = spa;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
arc_hdr_set_compress(hdr, compression_type);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_l1hdr.b_state = arc_anon;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_buf = NULL;
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
}
/*
* Transition between the two allocation states for the arc_buf_hdr struct.
* The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
* (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
* version is used when a cache buffer is only in the L2ARC in order to reduce
* memory usage.
*/
static arc_buf_hdr_t *
arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
{
ASSERT(HDR_HAS_L2HDR(hdr));
arc_buf_hdr_t *nhdr;
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
(old == hdr_l2only_cache && new == hdr_full_cache));
nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
memcpy(nhdr, hdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
/*
* arc_access and arc_change_state need to be aware that a
* header has just come out of L2ARC, so we set its state to
* l2c_only even though it's about to change.
*/
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
/*
* If we've reached here, We must have been called from
* arc_evict_hdr(), as such we should have already been
* removed from any ghost list we were previously on
* (which protects us from racing with arc_evict_state),
* thus no locking is needed during this check.
*/
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
/*
* A buffer must not be moved into the arc_l2c_only
* state if it's not finished being written out to the
* l2arc device. Otherwise, the b_l1hdr.b_pabd field
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
}
/*
* The header has been reallocated so we need to re-insert it into any
* lists it was on.
*/
(void) buf_hash_insert(nhdr, NULL);
ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
mutex_enter(&dev->l2ad_mtx);
/*
* We must place the realloc'ed header back into the list at
* the same spot. Otherwise, if it's placed earlier in the list,
* l2arc_write_buffers() could find it during the function's
* write phase, and try to write it out to the l2arc.
*/
list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
list_remove(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
/*
* Since we're using the pointer address as the tag when
* incrementing and decrementing the l2ad_alloc refcount, we
* must remove the old pointer (that we're about to destroy) and
* add the new pointer to the refcount. Otherwise we'd remove
* the wrong pointer address when calling arc_hdr_destroy() later.
*/
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(nhdr), nhdr);
buf_discard_identity(hdr);
kmem_cache_free(old, hdr);
return (nhdr);
}
/*
* This function is used by the send / receive code to convert a newly
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It
* is also used to allow the root objset block to be updated without altering
* its embedded MACs. Both block types will always be uncompressed so we do not
* have to worry about compression type or psize.
*/
void
arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED);
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
if (salt != NULL)
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
}
/*
* Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
* The buf is returned thawed since we expect the consumer to modify it.
*/
arc_buf_t *
arc_alloc_buf(spa_t *spa, const void *tag, arc_buf_contents_t type,
int32_t size)
{
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
B_FALSE, ZIO_COMPRESS_OFF, 0, type);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_FALSE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
/*
* Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
* for bufs containing metadata.
*/
arc_buf_t *
arc_alloc_compressed_buf(spa_t *spa, const void *tag, uint64_t psize,
uint64_t lsize, enum zio_compress compression_type, uint8_t complevel)
{
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
B_FALSE, compression_type, complevel, ARC_BUFC_DATA);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE,
B_TRUE, B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
/*
* To ensure that the hdr has the correct data in it if we call
* arc_untransform() on this buf before it's been written to disk,
* it's easiest if we just set up sharing between the buf and the hdr.
*/
arc_share_buf(hdr, buf);
return (buf);
}
arc_buf_t *
arc_alloc_raw_buf(spa_t *spa, const void *tag, uint64_t dsobj,
boolean_t byteorder, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac, dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_hdr_t *hdr;
arc_buf_t *buf;
arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ?
ARC_BUFC_METADATA : ARC_BUFC_DATA;
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE,
compression_type, complevel, type);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
* encrypted type. It will become authenticated instead in
* arc_write_ready().
*/
buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_TRUE, B_TRUE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
static void
l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only)
{
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = HDR_GET_L2SIZE(hdr);
arc_buf_contents_t type = hdr->b_type;
int64_t lsize_s;
int64_t psize_s;
int64_t asize_s;
/* For L2 we expect the header's b_l2size to be valid */
ASSERT3U(asize, >=, psize);
if (incr) {
lsize_s = lsize;
psize_s = psize;
asize_s = asize;
} else {
lsize_s = -lsize;
psize_s = -psize;
asize_s = -asize;
}
/* If the buffer is a prefetch, count it as such. */
if (HDR_PREFETCH(hdr)) {
ARCSTAT_INCR(arcstat_l2_prefetch_asize, asize_s);
} else {
/*
* We use the value stored in the L2 header upon initial
* caching in L2ARC. This value will be updated in case
* an MRU/MRU_ghost buffer transitions to MFU but the L2ARC
* metadata (log entry) cannot currently be updated. Having
* the ARC state in the L2 header solves the problem of a
* possibly absent L1 header (apparent in buffers restored
* from persistent L2ARC).
*/
switch (hdr->b_l2hdr.b_arcs_state) {
case ARC_STATE_MRU_GHOST:
case ARC_STATE_MRU:
ARCSTAT_INCR(arcstat_l2_mru_asize, asize_s);
break;
case ARC_STATE_MFU_GHOST:
case ARC_STATE_MFU:
ARCSTAT_INCR(arcstat_l2_mfu_asize, asize_s);
break;
default:
break;
}
}
if (state_only)
return;
ARCSTAT_INCR(arcstat_l2_psize, psize_s);
ARCSTAT_INCR(arcstat_l2_lsize, lsize_s);
switch (type) {
case ARC_BUFC_DATA:
ARCSTAT_INCR(arcstat_l2_bufc_data_asize, asize_s);
break;
case ARC_BUFC_METADATA:
ARCSTAT_INCR(arcstat_l2_bufc_metadata_asize, asize_s);
break;
default:
break;
}
}
static void
arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
ASSERT(HDR_HAS_L2HDR(hdr));
list_remove(&dev->l2ad_buflist, hdr);
l2arc_hdr_arcstats_decrement(hdr);
if (dev->l2ad_vdev != NULL) {
uint64_t asize = HDR_GET_L2SIZE(hdr);
vdev_space_update(dev->l2ad_vdev, -asize, 0, 0);
}
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
}
static void
arc_hdr_destroy(arc_buf_hdr_t *hdr)
{
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
}
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (HDR_HAS_L2HDR(hdr)) {
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
if (!buflist_held)
mutex_enter(&dev->l2ad_mtx);
/*
* Even though we checked this conditional above, we
* need to check this again now that we have the
* l2ad_mtx. This is because we could be racing with
* another thread calling l2arc_evict() which might have
* destroyed this header's L2 portion as we were waiting
* to acquire the l2ad_mtx. If that happens, we don't
* want to re-destroy the header's L2 portion.
*/
if (HDR_HAS_L2HDR(hdr)) {
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
arc_hdr_l2hdr_destroy(hdr);
}
if (!buflist_held)
mutex_exit(&dev->l2ad_mtx);
}
/*
* The header's identify can only be safely discarded once it is no
* longer discoverable. This requires removing it from the hash table
* and the l2arc header list. After this point the hash lock can not
* be used to protect the header.
*/
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
if (HDR_HAS_L1HDR(hdr)) {
arc_cksum_free(hdr);
while (hdr->b_l1hdr.b_buf != NULL)
arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
kmem_cache_free(hdr_full_cache, hdr);
} else {
kmem_cache_free(hdr_l2only_cache, hdr);
}
}
void
arc_buf_destroy(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf);
ASSERT(ARC_BUF_LAST(buf));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
VERIFY0(remove_reference(hdr, tag));
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hdr, ==, buf->b_hdr);
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
ASSERT3P(buf->b_data, !=, NULL);
arc_buf_destroy_impl(buf);
(void) remove_reference(hdr, tag);
mutex_exit(hash_lock);
}
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on its state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost
* - arc_mfu -> arc_mfu_ghost
* - arc_mru_ghost -> arc_l2c_only
* - arc_mru_ghost -> deleted
* - arc_mfu_ghost -> arc_l2c_only
* - arc_mfu_ghost -> deleted
* - arc_uncached -> deleted
*
* Return total size of evicted data buffers for eviction progress tracking.
* When evicting from ghost states return logical buffer size to make eviction
* progress at the same (or at least comparable) rate as from non-ghost states.
*
* Return *real_evicted for actual ARC size reduction to wake up threads
* waiting for it. For non-ghost states it includes size of evicted data
* buffers (the headers are not freed there). For ghost states it includes
* only the evicted headers size.
*/
static int64_t
arc_evict_hdr(arc_buf_hdr_t *hdr, uint64_t *real_evicted)
{
arc_state_t *evicted_state, *state;
int64_t bytes_evicted = 0;
uint_t min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
*real_evicted = 0;
state = hdr->b_l1hdr.b_state;
if (GHOST_STATE(state)) {
/*
* l2arc_write_buffers() relies on a header's L1 portion
* (i.e. its b_pabd field) during it's write phase.
* Thus, we cannot push a header onto the arc_l2c_only
* state (removing its L1 piece) until the header is
* done being written to the l2arc.
*/
if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
ARCSTAT_BUMP(arcstat_evict_l2_skip);
return (bytes_evicted);
}
ARCSTAT_BUMP(arcstat_deleted);
bytes_evicted += HDR_GET_LSIZE(hdr);
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_pabd == NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
arc_change_state(arc_l2c_only, hdr);
/*
* dropping from L1+L2 cached to L2-only,
* realloc to remove the L1 header.
*/
(void) arc_hdr_realloc(hdr, hdr_full_cache,
hdr_l2only_cache);
*real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE;
} else {
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
}
return (bytes_evicted);
}
ASSERT(state == arc_mru || state == arc_mfu || state == arc_uncached);
evicted_state = (state == arc_uncached) ? arc_anon :
((state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost);
/* prefetch buffers have a minimum lifespan */
if ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
MSEC_TO_TICK(min_lifetime)) {
ARCSTAT_BUMP(arcstat_evict_skip);
return (bytes_evicted);
}
if (HDR_HAS_L2HDR(hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
} else {
if (l2arc_write_eligible(hdr->b_spa, hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_eligible,
HDR_GET_LSIZE(hdr));
switch (state->arcs_state) {
case ARC_STATE_MRU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mru,
HDR_GET_LSIZE(hdr));
break;
case ARC_STATE_MFU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mfu,
HDR_GET_LSIZE(hdr));
break;
default:
break;
}
} else {
ARCSTAT_INCR(arcstat_evict_l2_ineligible,
HDR_GET_LSIZE(hdr));
}
}
bytes_evicted += arc_hdr_size(hdr);
*real_evicted += arc_hdr_size(hdr);
/*
* If this hdr is being evicted and has a compressed buffer then we
* discard it here before we change states. This ensures that the
* accounting is updated correctly in arc_free_data_impl().
*/
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
arc_change_state(evicted_state, hdr);
DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
if (evicted_state == arc_anon) {
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
} else {
ASSERT(HDR_IN_HASH_TABLE(hdr));
}
return (bytes_evicted);
}
static void
arc_set_need_free(void)
{
ASSERT(MUTEX_HELD(&arc_evict_lock));
int64_t remaining = arc_free_memory() - arc_sys_free / 2;
arc_evict_waiter_t *aw = list_tail(&arc_evict_waiters);
if (aw == NULL) {
arc_need_free = MAX(-remaining, 0);
} else {
arc_need_free =
MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count));
}
}
static uint64_t
arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
uint64_t spa, uint64_t bytes)
{
multilist_sublist_t *mls;
uint64_t bytes_evicted = 0, real_evicted = 0;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint_t evict_count = zfs_arc_evict_batch_limit;
ASSERT3P(marker, !=, NULL);
mls = multilist_sublist_lock_idx(ml, idx);
for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL);
hdr = multilist_sublist_prev(mls, marker)) {
if ((evict_count == 0) || (bytes_evicted >= bytes))
break;
/*
* To keep our iteration location, move the marker
* forward. Since we're not holding hdr's hash lock, we
* must be very careful and not remove 'hdr' from the
* sublist. Otherwise, other consumers might mistake the
* 'hdr' as not being on a sublist when they call the
* multilist_link_active() function (they all rely on
* the hash lock protecting concurrent insertions and
* removals). multilist_sublist_move_forward() was
* specifically implemented to ensure this is the case
* (only 'marker' will be removed and re-inserted).
*/
multilist_sublist_move_forward(mls, marker);
/*
* The only case where the b_spa field should ever be
* zero, is the marker headers inserted by
* arc_evict_state(). It's possible for multiple threads
* to be calling arc_evict_state() concurrently (e.g.
* dsl_pool_close() and zio_inject_fault()), so we must
* skip any markers we see from these other threads.
*/
if (hdr->b_spa == 0)
continue;
/* we're only interested in evicting buffers of a certain spa */
if (spa != 0 && hdr->b_spa != spa) {
ARCSTAT_BUMP(arcstat_evict_skip);
continue;
}
hash_lock = HDR_LOCK(hdr);
/*
* We aren't calling this function from any code path
* that would already be holding a hash lock, so we're
* asserting on this assumption to be defensive in case
* this ever changes. Without this check, it would be
* possible to incorrectly increment arcstat_mutex_miss
* below (e.g. if the code changed such that we called
* this function with a hash lock held).
*/
ASSERT(!MUTEX_HELD(hash_lock));
if (mutex_tryenter(hash_lock)) {
uint64_t revicted;
uint64_t evicted = arc_evict_hdr(hdr, &revicted);
mutex_exit(hash_lock);
bytes_evicted += evicted;
real_evicted += revicted;
/*
* If evicted is zero, arc_evict_hdr() must have
* decided to skip this header, don't increment
* evict_count in this case.
*/
if (evicted != 0)
evict_count--;
} else {
ARCSTAT_BUMP(arcstat_mutex_miss);
}
}
multilist_sublist_unlock(mls);
/*
* Increment the count of evicted bytes, and wake up any threads that
* are waiting for the count to reach this value. Since the list is
* ordered by ascending aew_count, we pop off the beginning of the
* list until we reach the end, or a waiter that's past the current
* "count". Doing this outside the loop reduces the number of times
* we need to acquire the global arc_evict_lock.
*
* Only wake when there's sufficient free memory in the system
* (specifically, arc_sys_free/2, which by default is a bit more than
* 1/64th of RAM). See the comments in arc_wait_for_eviction().
*/
mutex_enter(&arc_evict_lock);
arc_evict_count += real_evicted;
if (arc_free_memory() > arc_sys_free / 2) {
arc_evict_waiter_t *aw;
while ((aw = list_head(&arc_evict_waiters)) != NULL &&
aw->aew_count <= arc_evict_count) {
list_remove(&arc_evict_waiters, aw);
cv_broadcast(&aw->aew_cv);
}
}
arc_set_need_free();
mutex_exit(&arc_evict_lock);
/*
* If the ARC size is reduced from arc_c_max to arc_c_min (especially
* if the average cached block is small), eviction can be on-CPU for
* many seconds. To ensure that other threads that may be bound to
* this CPU are able to make progress, make a voluntary preemption
* call here.
*/
kpreempt(KPREEMPT_SYNC);
return (bytes_evicted);
}
static arc_buf_hdr_t *
arc_state_alloc_marker(void)
{
arc_buf_hdr_t *marker = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
/*
* A b_spa of 0 is used to indicate that this header is
* a marker. This fact is used in arc_evict_state_impl().
*/
marker->b_spa = 0;
return (marker);
}
static void
arc_state_free_marker(arc_buf_hdr_t *marker)
{
kmem_cache_free(hdr_full_cache, marker);
}
/*
* Allocate an array of buffer headers used as placeholders during arc state
* eviction.
*/
static arc_buf_hdr_t **
arc_state_alloc_markers(int count)
{
arc_buf_hdr_t **markers;
markers = kmem_zalloc(sizeof (*markers) * count, KM_SLEEP);
for (int i = 0; i < count; i++)
markers[i] = arc_state_alloc_marker();
return (markers);
}
static void
arc_state_free_markers(arc_buf_hdr_t **markers, int count)
{
for (int i = 0; i < count; i++)
arc_state_free_marker(markers[i]);
kmem_free(markers, sizeof (*markers) * count);
}
/*
* Evict buffers from the given arc state, until we've removed the
* specified number of bytes. Move the removed buffers to the
* appropriate evict state.
*
* This function makes a "best effort". It skips over any buffers
* it can't get a hash_lock on, and so, may not catch all candidates.
* It may also return without evicting as much space as requested.
*
* If bytes is specified using the special value ARC_EVICT_ALL, this
* will evict all available (i.e. unlocked and evictable) buffers from
* the given arc state; which is used by arc_flush().
*/
static uint64_t
arc_evict_state(arc_state_t *state, arc_buf_contents_t type, uint64_t spa,
uint64_t bytes)
{
uint64_t total_evicted = 0;
multilist_t *ml = &state->arcs_list[type];
int num_sublists;
arc_buf_hdr_t **markers;
num_sublists = multilist_get_num_sublists(ml);
/*
* If we've tried to evict from each sublist, made some
* progress, but still have not hit the target number of bytes
* to evict, we want to keep trying. The markers allow us to
* pick up where we left off for each individual sublist, rather
* than starting from the tail each time.
*/
if (zthr_iscurthread(arc_evict_zthr)) {
markers = arc_state_evict_markers;
ASSERT3S(num_sublists, <=, arc_state_evict_marker_count);
} else {
markers = arc_state_alloc_markers(num_sublists);
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls;
mls = multilist_sublist_lock_idx(ml, i);
multilist_sublist_insert_tail(mls, markers[i]);
multilist_sublist_unlock(mls);
}
/*
* While we haven't hit our target number of bytes to evict, or
* we're evicting all available buffers.
*/
while (total_evicted < bytes) {
int sublist_idx = multilist_get_random_index(ml);
uint64_t scan_evicted = 0;
/*
* Start eviction using a randomly selected sublist,
* this is to try and evenly balance eviction across all
* sublists. Always starting at the same sublist
* (e.g. index 0) would cause evictions to favor certain
* sublists over others.
*/
for (int i = 0; i < num_sublists; i++) {
uint64_t bytes_remaining;
uint64_t bytes_evicted;
if (total_evicted < bytes)
bytes_remaining = bytes - total_evicted;
else
break;
bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
markers[sublist_idx], spa, bytes_remaining);
scan_evicted += bytes_evicted;
total_evicted += bytes_evicted;
/* we've reached the end, wrap to the beginning */
if (++sublist_idx >= num_sublists)
sublist_idx = 0;
}
/*
* If we didn't evict anything during this scan, we have
* no reason to believe we'll evict more during another
* scan, so break the loop.
*/
if (scan_evicted == 0) {
/* This isn't possible, let's make that obvious */
ASSERT3S(bytes, !=, 0);
/*
* When bytes is ARC_EVICT_ALL, the only way to
* break the loop is when scan_evicted is zero.
* In that case, we actually have evicted enough,
* so we don't want to increment the kstat.
*/
if (bytes != ARC_EVICT_ALL) {
ASSERT3S(total_evicted, <, bytes);
ARCSTAT_BUMP(arcstat_evict_not_enough);
}
break;
}
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls = multilist_sublist_lock_idx(ml, i);
multilist_sublist_remove(mls, markers[i]);
multilist_sublist_unlock(mls);
}
if (markers != arc_state_evict_markers)
arc_state_free_markers(markers, num_sublists);
return (total_evicted);
}
/*
* Flush all "evictable" data of the given type from the arc state
* specified. This will not evict any "active" buffers (i.e. referenced).
*
* When 'retry' is set to B_FALSE, the function will make a single pass
* over the state and evict any buffers that it can. Since it doesn't
* continually retry the eviction, it might end up leaving some buffers
* in the ARC due to lock misses.
*
* When 'retry' is set to B_TRUE, the function will continually retry the
* eviction until *all* evictable buffers have been removed from the
* state. As a result, if concurrent insertions into the state are
* allowed (e.g. if the ARC isn't shutting down), this function might
* wind up in an infinite loop, continually trying to evict buffers.
*/
static uint64_t
arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
boolean_t retry)
{
uint64_t evicted = 0;
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
evicted += arc_evict_state(state, type, spa, ARC_EVICT_ALL);
if (!retry)
break;
}
return (evicted);
}
/*
* Evict the specified number of bytes from the state specified. This
* function prevents us from trying to evict more from a state's list
* than is "evictable", and to skip evicting altogether when passed a
* negative value for "bytes". In contrast, arc_evict_state() will
* evict everything it can, when passed a negative value for "bytes".
*/
static uint64_t
arc_evict_impl(arc_state_t *state, arc_buf_contents_t type, int64_t bytes)
{
uint64_t delta;
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
bytes);
return (arc_evict_state(state, type, 0, delta));
}
return (0);
}
/*
* Adjust specified fraction, taking into account initial ghost state(s) size,
* ghost hit bytes towards increasing the fraction, ghost hit bytes towards
* decreasing it, plus a balance factor, controlling the decrease rate, used
* to balance metadata vs data.
*/
static uint64_t
arc_evict_adj(uint64_t frac, uint64_t total, uint64_t up, uint64_t down,
uint_t balance)
{
if (total < 8 || up + down == 0)
return (frac);
/*
* We should not have more ghost hits than ghost size, but they
* may get close. Restrict maximum adjustment in that case.
*/
if (up + down >= total / 4) {
uint64_t scale = (up + down) / (total / 8);
up /= scale;
down /= scale;
}
/* Get maximal dynamic range by choosing optimal shifts. */
int s = highbit64(total);
s = MIN(64 - s, 32);
uint64_t ofrac = (1ULL << 32) - frac;
if (frac >= 4 * ofrac)
up /= frac / (2 * ofrac + 1);
up = (up << s) / (total >> (32 - s));
if (ofrac >= 4 * frac)
down /= ofrac / (2 * frac + 1);
down = (down << s) / (total >> (32 - s));
down = down * 100 / balance;
return (frac + up - down);
}
/*
* Calculate (x * multiplier / divisor) without unnecesary overflows.
*/
static uint64_t
arc_mf(uint64_t x, uint64_t multiplier, uint64_t divisor)
{
uint64_t q = (x / divisor);
uint64_t r = (x % divisor);
return ((q * multiplier) + ((r * multiplier) / divisor));
}
/*
* Evict buffers from the cache, such that arcstat_size is capped by arc_c.
*/
static uint64_t
arc_evict(void)
{
uint64_t bytes, total_evicted = 0;
int64_t e, mrud, mrum, mfud, mfum, w;
static uint64_t ogrd, ogrm, ogfd, ogfm;
static uint64_t gsrd, gsrm, gsfd, gsfm;
uint64_t ngrd, ngrm, ngfd, ngfm;
/* Get current size of ARC states we can evict from. */
mrud = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]);
mrum = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) +
zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]);
mfud = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_DATA]);
mfum = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]);
uint64_t d = mrud + mfud;
uint64_t m = mrum + mfum;
uint64_t t = d + m;
/* Get ARC ghost hits since last eviction. */
ngrd = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]);
uint64_t grd = ngrd - ogrd;
ogrd = ngrd;
ngrm = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]);
uint64_t grm = ngrm - ogrm;
ogrm = ngrm;
ngfd = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]);
uint64_t gfd = ngfd - ogfd;
ogfd = ngfd;
ngfm = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]);
uint64_t gfm = ngfm - ogfm;
ogfm = ngfm;
/* Adjust ARC states balance based on ghost hits. */
arc_meta = arc_evict_adj(arc_meta, gsrd + gsrm + gsfd + gsfm,
grm + gfm, grd + gfd, zfs_arc_meta_balance);
arc_pd = arc_evict_adj(arc_pd, gsrd + gsfd, grd, gfd, 100);
arc_pm = arc_evict_adj(arc_pm, gsrm + gsfm, grm, gfm, 100);
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
uint64_t ac = arc_c;
int64_t wt = t - (asize - ac);
/*
* Try to reduce pinned dnodes if more than 3/4 of wanted metadata
* target is not evictable or if they go over arc_dnode_limit.
*/
int64_t prune = 0;
int64_t dn = wmsum_value(&arc_sums.arcstat_dnode_size);
int64_t nem = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA])
+ zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA])
- zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA])
- zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
w = wt * (int64_t)(arc_meta >> 16) >> 16;
if (nem > w * 3 / 4) {
prune = dn / sizeof (dnode_t) *
zfs_arc_dnode_reduce_percent / 100;
if (nem < w && w > 4)
prune = arc_mf(prune, nem - w * 3 / 4, w / 4);
}
if (dn > arc_dnode_limit) {
prune = MAX(prune, (dn - arc_dnode_limit) / sizeof (dnode_t) *
zfs_arc_dnode_reduce_percent / 100);
}
if (prune > 0)
arc_prune_async(prune);
/* Evict MRU metadata. */
w = wt * (int64_t)(arc_meta * arc_pm >> 48) >> 16;
e = MIN((int64_t)(asize - ac), (int64_t)(mrum - w));
bytes = arc_evict_impl(arc_mru, ARC_BUFC_METADATA, e);
total_evicted += bytes;
mrum -= bytes;
asize -= bytes;
/* Evict MFU metadata. */
w = wt * (int64_t)(arc_meta >> 16) >> 16;
e = MIN((int64_t)(asize - ac), (int64_t)(m - bytes - w));
bytes = arc_evict_impl(arc_mfu, ARC_BUFC_METADATA, e);
total_evicted += bytes;
mfum -= bytes;
asize -= bytes;
/* Evict MRU data. */
wt -= m - total_evicted;
w = wt * (int64_t)(arc_pd >> 16) >> 16;
e = MIN((int64_t)(asize - ac), (int64_t)(mrud - w));
bytes = arc_evict_impl(arc_mru, ARC_BUFC_DATA, e);
total_evicted += bytes;
mrud -= bytes;
asize -= bytes;
/* Evict MFU data. */
e = asize - ac;
bytes = arc_evict_impl(arc_mfu, ARC_BUFC_DATA, e);
mfud -= bytes;
total_evicted += bytes;
/*
* Evict ghost lists
*
* Size of each state's ghost list represents how much that state
* may grow by shrinking the other states. Would it need to shrink
* other states to zero (that is unlikely), its ghost size would be
* equal to sum of other three state sizes. But excessive ghost
* size may result in false ghost hits (too far back), that may
* never result in real cache hits if several states are competing.
* So choose some arbitraty point of 1/2 of other state sizes.
*/
gsrd = (mrum + mfud + mfum) / 2;
e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]) -
gsrd;
(void) arc_evict_impl(arc_mru_ghost, ARC_BUFC_DATA, e);
gsrm = (mrud + mfud + mfum) / 2;
e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]) -
gsrm;
(void) arc_evict_impl(arc_mru_ghost, ARC_BUFC_METADATA, e);
gsfd = (mrud + mrum + mfum) / 2;
e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]) -
gsfd;
(void) arc_evict_impl(arc_mfu_ghost, ARC_BUFC_DATA, e);
gsfm = (mrud + mrum + mfud) / 2;
e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]) -
gsfm;
(void) arc_evict_impl(arc_mfu_ghost, ARC_BUFC_METADATA, e);
return (total_evicted);
}
static void
arc_flush_impl(uint64_t guid, boolean_t retry)
{
ASSERT(!retry || guid == 0);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_uncached, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_uncached, guid, ARC_BUFC_METADATA, retry);
}
void
arc_flush(spa_t *spa, boolean_t retry)
{
/*
* If retry is B_TRUE, a spa must not be specified since we have
* no good way to determine if all of a spa's buffers have been
* evicted from an arc state.
*/
ASSERT(!retry || spa == NULL);
arc_flush_impl(spa != NULL ? spa_load_guid(spa) : 0, retry);
}
static arc_async_flush_t *
arc_async_flush_add(uint64_t spa_guid, uint_t level)
{
arc_async_flush_t *af = kmem_alloc(sizeof (*af), KM_SLEEP);
af->af_spa_guid = spa_guid;
af->af_cache_level = level;
taskq_init_ent(&af->af_tqent);
list_link_init(&af->af_node);
mutex_enter(&arc_async_flush_lock);
list_insert_tail(&arc_async_flush_list, af);
mutex_exit(&arc_async_flush_lock);
return (af);
}
static void
arc_async_flush_remove(uint64_t spa_guid, uint_t level)
{
mutex_enter(&arc_async_flush_lock);
for (arc_async_flush_t *af = list_head(&arc_async_flush_list);
af != NULL; af = list_next(&arc_async_flush_list, af)) {
if (af->af_spa_guid == spa_guid &&
af->af_cache_level == level) {
list_remove(&arc_async_flush_list, af);
kmem_free(af, sizeof (*af));
break;
}
}
mutex_exit(&arc_async_flush_lock);
}
static void
arc_flush_task(void *arg)
{
arc_async_flush_t *af = arg;
hrtime_t start_time = gethrtime();
uint64_t spa_guid = af->af_spa_guid;
arc_flush_impl(spa_guid, B_FALSE);
arc_async_flush_remove(spa_guid, af->af_cache_level);
uint64_t elaspsed = NSEC2MSEC(gethrtime() - start_time);
if (elaspsed > 0) {
zfs_dbgmsg("spa %llu arc flushed in %llu ms",
(u_longlong_t)spa_guid, (u_longlong_t)elaspsed);
}
}
/*
* ARC buffers use the spa's load guid and can continue to exist after
* the spa_t is gone (exported). The blocks are orphaned since each
* spa import has a different load guid.
*
* It's OK if the spa is re-imported while this asynchronous flush is
* still in progress. The new spa_load_guid will be different.
*
* Also, arc_fini will wait for any arc_flush_task to finish.
*/
void
arc_flush_async(spa_t *spa)
{
uint64_t spa_guid = spa_load_guid(spa);
arc_async_flush_t *af = arc_async_flush_add(spa_guid, 1);
taskq_dispatch_ent(arc_flush_taskq, arc_flush_task,
af, TQ_SLEEP, &af->af_tqent);
}
/*
* Check if a guid is still in-use as part of an async teardown task
*/
boolean_t
arc_async_flush_guid_inuse(uint64_t spa_guid)
{
mutex_enter(&arc_async_flush_lock);
for (arc_async_flush_t *af = list_head(&arc_async_flush_list);
af != NULL; af = list_next(&arc_async_flush_list, af)) {
if (af->af_spa_guid == spa_guid) {
mutex_exit(&arc_async_flush_lock);
return (B_TRUE);
}
}
mutex_exit(&arc_async_flush_lock);
return (B_FALSE);
}
uint64_t
arc_reduce_target_size(uint64_t to_free)
{
/*
* Get the actual arc size. Even if we don't need it, this updates
* the aggsum lower bound estimate for arc_is_overflowing().
*/
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
/*
* All callers want the ARC to actually evict (at least) this much
* memory. Therefore we reduce from the lower of the current size and
* the target size. This way, even if arc_c is much higher than
* arc_size (as can be the case after many calls to arc_freed(), we will
* immediately have arc_c < arc_size and therefore the arc_evict_zthr
* will evict.
*/
uint64_t c = arc_c;
if (c > arc_c_min) {
c = MIN(c, MAX(asize, arc_c_min));
to_free = MIN(to_free, c - arc_c_min);
arc_c = c - to_free;
} else {
to_free = 0;
}
/*
* Whether or not we reduced the target size, request eviction if the
* current size is over it now, since caller obviously wants some RAM.
*/
if (asize > arc_c) {
/* See comment in arc_evict_cb_check() on why lock+flag */
mutex_enter(&arc_evict_lock);
arc_evict_needed = B_TRUE;
mutex_exit(&arc_evict_lock);
zthr_wakeup(arc_evict_zthr);
}
return (to_free);
}
/*
* Determine if the system is under memory pressure and is asking
* to reclaim memory. A return value of B_TRUE indicates that the system
* is under memory pressure and that the arc should adjust accordingly.
*/
boolean_t
arc_reclaim_needed(void)
{
return (arc_available_memory() < 0);
}
void
arc_kmem_reap_soon(void)
{
size_t i;
kmem_cache_t *prev_cache = NULL;
kmem_cache_t *prev_data_cache = NULL;
#ifdef _KERNEL
#if defined(_ILP32)
/*
* Reclaim unused memory from all kmem caches.
*/
kmem_reap();
#endif
#endif
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
#if defined(_ILP32)
/* reach upper limit of cache size on 32-bit */
if (zio_buf_cache[i] == NULL)
break;
#endif
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
kmem_cache_reap_now(zio_buf_cache[i]);
}
if (zio_data_buf_cache[i] != prev_data_cache) {
prev_data_cache = zio_data_buf_cache[i];
kmem_cache_reap_now(zio_data_buf_cache[i]);
}
}
kmem_cache_reap_now(buf_cache);
kmem_cache_reap_now(hdr_full_cache);
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(zfs_btree_leaf_cache);
abd_cache_reap_now();
}
static boolean_t
arc_evict_cb_check(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
#ifdef ZFS_DEBUG
/*
* This is necessary in order to keep the kstat information
* up to date for tools that display kstat data such as the
* mdb ::arc dcmd and the Linux crash utility. These tools
* typically do not call kstat's update function, but simply
* dump out stats from the most recent update. Without
* this call, these commands may show stale stats for the
* anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
* with this call, the data might be out of date if the
* evict thread hasn't been woken recently; but that should
* suffice. The arc_state_t structures can be queried
* directly if more accurate information is needed.
*/
if (arc_ksp != NULL)
arc_ksp->ks_update(arc_ksp, KSTAT_READ);
#endif
/*
* We have to rely on arc_wait_for_eviction() to tell us when to
* evict, rather than checking if we are overflowing here, so that we
* are sure to not leave arc_wait_for_eviction() waiting on aew_cv.
* If we have become "not overflowing" since arc_wait_for_eviction()
* checked, we need to wake it up. We could broadcast the CV here,
* but arc_wait_for_eviction() may have not yet gone to sleep. We
* would need to use a mutex to ensure that this function doesn't
* broadcast until arc_wait_for_eviction() has gone to sleep (e.g.
* the arc_evict_lock). However, the lock ordering of such a lock
* would necessarily be incorrect with respect to the zthr_lock,
* which is held before this function is called, and is held by
* arc_wait_for_eviction() when it calls zthr_wakeup().
*/
if (arc_evict_needed)
return (B_TRUE);
/*
* If we have buffers in uncached state, evict them periodically.
*/
return ((zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]) &&
ddi_get_lbolt() - arc_last_uncached_flush >
MSEC_TO_TICK(arc_min_prefetch_ms / 2)));
}
/*
* Keep arc_size under arc_c by running arc_evict which evicts data
* from the ARC.
*/
static void
arc_evict_cb(void *arg, zthr_t *zthr)
{
(void) arg;
uint64_t evicted = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
/* Always try to evict from uncached state. */
arc_last_uncached_flush = ddi_get_lbolt();
evicted += arc_flush_state(arc_uncached, 0, ARC_BUFC_DATA, B_FALSE);
evicted += arc_flush_state(arc_uncached, 0, ARC_BUFC_METADATA, B_FALSE);
/* Evict from other states only if told to. */
if (arc_evict_needed)
evicted += arc_evict();
/*
* If evicted is zero, we couldn't evict anything
* via arc_evict(). This could be due to hash lock
* collisions, but more likely due to the majority of
* arc buffers being unevictable. Therefore, even if
* arc_size is above arc_c, another pass is unlikely to
* be helpful and could potentially cause us to enter an
* infinite loop. Additionally, zthr_iscancelled() is
* checked here so that if the arc is shutting down, the
* broadcast will wake any remaining arc evict waiters.
*
* Note we cancel using zthr instead of arc_evict_zthr
* because the latter may not yet be initializd when the
* callback is first invoked.
*/
mutex_enter(&arc_evict_lock);
arc_evict_needed = !zthr_iscancelled(zthr) &&
evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0;
if (!arc_evict_needed) {
/*
* We're either no longer overflowing, or we
* can't evict anything more, so we should wake
* arc_get_data_impl() sooner.
*/
arc_evict_waiter_t *aw;
while ((aw = list_remove_head(&arc_evict_waiters)) != NULL) {
cv_broadcast(&aw->aew_cv);
}
arc_set_need_free();
}
mutex_exit(&arc_evict_lock);
spl_fstrans_unmark(cookie);
}
static boolean_t
arc_reap_cb_check(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
int64_t free_memory = arc_available_memory();
static int reap_cb_check_counter = 0;
/*
* If a kmem reap is already active, don't schedule more. We must
* check for this because kmem_cache_reap_soon() won't actually
* block on the cache being reaped (this is to prevent callers from
* becoming implicitly blocked by a system-wide kmem reap -- which,
* on a system with many, many full magazines, can take minutes).
*/
if (!kmem_cache_reap_active() && free_memory < 0) {
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
/*
* Wait at least zfs_grow_retry (default 5) seconds
* before considering growing.
*/
arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
return (B_TRUE);
} else if (free_memory < arc_c >> arc_no_grow_shift) {
arc_no_grow = B_TRUE;
} else if (gethrtime() >= arc_growtime) {
arc_no_grow = B_FALSE;
}
/*
* Called unconditionally every 60 seconds to reclaim unused
* zstd compression and decompression context. This is done
* here to avoid the need for an independent thread.
*/
if (!((reap_cb_check_counter++) % 60))
zfs_zstd_cache_reap_now();
return (B_FALSE);
}
/*
* Keep enough free memory in the system by reaping the ARC's kmem
* caches. To cause more slabs to be reapable, we may reduce the
* target size of the cache (arc_c), causing the arc_evict_cb()
* to free more buffers.
*/
static void
arc_reap_cb(void *arg, zthr_t *zthr)
{
int64_t can_free, free_memory, to_free;
(void) arg, (void) zthr;
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* Kick off asynchronous kmem_reap()'s of all our caches.
*/
arc_kmem_reap_soon();
/*
* Wait at least arc_kmem_cache_reap_retry_ms between
* arc_kmem_reap_soon() calls. Without this check it is possible to
* end up in a situation where we spend lots of time reaping
* caches, while we're near arc_c_min. Waiting here also gives the
* subsequent free memory check a chance of finding that the
* asynchronous reap has already freed enough memory, and we don't
* need to call arc_reduce_target_size().
*/
delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000);
/*
* Reduce the target size as needed to maintain the amount of free
* memory in the system at a fraction of the arc_size (1/128th by
* default). If oversubscribed (free_memory < 0) then reduce the
* target arc_size by the deficit amount plus the fractional
* amount. If free memory is positive but less than the fractional
* amount, reduce by what is needed to hit the fractional amount.
*/
free_memory = arc_available_memory();
can_free = arc_c - arc_c_min;
to_free = (MAX(can_free, 0) >> arc_shrink_shift) - free_memory;
if (to_free > 0)
arc_reduce_target_size(to_free);
spl_fstrans_unmark(cookie);
}
#ifdef _KERNEL
/*
* Determine the amount of memory eligible for eviction contained in the
* ARC. All clean data reported by the ghost lists can always be safely
* evicted. Due to arc_c_min, the same does not hold for all clean data
* contained by the regular mru and mfu lists.
*
* In the case of the regular mru and mfu lists, we need to report as
* much clean data as possible, such that evicting that same reported
* data will not bring arc_size below arc_c_min. Thus, in certain
* circumstances, the total amount of clean data in the mru and mfu
* lists might not actually be evictable.
*
* The following two distinct cases are accounted for:
*
* 1. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is greater than or equal to arc_c_min.
* (i.e. amount of dirty data >= arc_c_min)
*
* This is the easy case; all clean data contained by the mru and mfu
* lists is evictable. Evicting all clean data can only drop arc_size
* to the amount of dirty data, which is greater than arc_c_min.
*
* 2. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is less than arc_c_min.
* (i.e. arc_c_min > amount of dirty data)
*
* 2.1. arc_size is greater than or equal arc_c_min.
* (i.e. arc_size >= arc_c_min > amount of dirty data)
*
* In this case, not all clean data from the regular mru and mfu
* lists is actually evictable; we must leave enough clean data
* to keep arc_size above arc_c_min. Thus, the maximum amount of
* evictable data from the two lists combined, is exactly the
* difference between arc_size and arc_c_min.
*
* 2.2. arc_size is less than arc_c_min
* (i.e. arc_c_min > arc_size > amount of dirty data)
*
* In this case, none of the data contained in the mru and mfu
* lists is evictable, even if it's clean. Since arc_size is
* already below arc_c_min, evicting any more would only
* increase this negative difference.
*/
#endif /* _KERNEL */
/*
* Adapt arc info given the number of bytes we are trying to add and
* the state that we are coming from. This function is only called
* when we are adding new content to the cache.
*/
static void
arc_adapt(uint64_t bytes)
{
/*
* Wake reap thread if we do not have any available memory
*/
if (arc_reclaim_needed()) {
zthr_wakeup(arc_reap_zthr);
return;
}
if (arc_no_grow)
return;
if (arc_c >= arc_c_max)
return;
/*
* If we're within (2 * maxblocksize) bytes of the target
* cache size, increment the target cache size
*/
if (aggsum_upper_bound(&arc_sums.arcstat_size) +
2 * SPA_MAXBLOCKSIZE >= arc_c) {
uint64_t dc = MAX(bytes, SPA_OLD_MAXBLOCKSIZE);
if (atomic_add_64_nv(&arc_c, dc) > arc_c_max)
arc_c = arc_c_max;
}
}
/*
* Check if ARC current size has grown past our upper thresholds.
*/
static arc_ovf_level_t
arc_is_overflowing(boolean_t lax, boolean_t use_reserve)
{
/*
* We just compare the lower bound here for performance reasons. Our
* primary goals are to make sure that the arc never grows without
* bound, and that it can reach its maximum size. This check
* accomplishes both goals. The maximum amount we could run over by is
* 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
* in the ARC. In practice, that's in the tens of MB, which is low
* enough to be safe.
*/
int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) - arc_c -
zfs_max_recordsize;
/* Always allow at least one block of overflow. */
if (over < 0)
return (ARC_OVF_NONE);
/* If we are under memory pressure, report severe overflow. */
if (!lax)
return (ARC_OVF_SEVERE);
/* We are not under pressure, so be more or less relaxed. */
int64_t overflow = (arc_c >> zfs_arc_overflow_shift) / 2;
if (use_reserve)
overflow *= 3;
return (over < overflow ? ARC_OVF_SOME : ARC_OVF_SEVERE);
}
static abd_t *
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, const void *tag,
int alloc_flags)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, alloc_flags);
if (alloc_flags & ARC_HDR_ALLOC_LINEAR)
return (abd_alloc_linear(size, type == ARC_BUFC_METADATA));
else
return (abd_alloc(size, type == ARC_BUFC_METADATA));
}
static void *
arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, const void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, 0);
if (type == ARC_BUFC_METADATA) {
return (zio_buf_alloc(size));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (zio_data_buf_alloc(size));
}
}
/*
* Wait for the specified amount of data (in bytes) to be evicted from the
* ARC, and for there to be sufficient free memory in the system.
* The lax argument specifies that caller does not have a specific reason
* to wait, not aware of any memory pressure. Low memory handlers though
* should set it to B_FALSE to wait for all required evictions to complete.
* The use_reserve argument allows some callers to wait less than others
* to not block critical code paths, possibly blocking other resources.
*/
void
arc_wait_for_eviction(uint64_t amount, boolean_t lax, boolean_t use_reserve)
{
switch (arc_is_overflowing(lax, use_reserve)) {
case ARC_OVF_NONE:
return;
case ARC_OVF_SOME:
/*
* This is a bit racy without taking arc_evict_lock, but the
* worst that can happen is we either call zthr_wakeup() extra
* time due to race with other thread here, or the set flag
* get cleared by arc_evict_cb(), which is unlikely due to
* big hysteresis, but also not important since at this level
* of overflow the eviction is purely advisory. Same time
* taking the global lock here every time without waiting for
* the actual eviction creates a significant lock contention.
*/
if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
return;
case ARC_OVF_SEVERE:
default:
{
arc_evict_waiter_t aw;
list_link_init(&aw.aew_node);
cv_init(&aw.aew_cv, NULL, CV_DEFAULT, NULL);
uint64_t last_count = 0;
mutex_enter(&arc_evict_lock);
if (!list_is_empty(&arc_evict_waiters)) {
arc_evict_waiter_t *last =
list_tail(&arc_evict_waiters);
last_count = last->aew_count;
} else if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
/*
* Note, the last waiter's count may be less than
* arc_evict_count if we are low on memory in which
* case arc_evict_state_impl() may have deferred
* wakeups (but still incremented arc_evict_count).
*/
aw.aew_count = MAX(last_count, arc_evict_count) + amount;
list_insert_tail(&arc_evict_waiters, &aw);
arc_set_need_free();
DTRACE_PROBE3(arc__wait__for__eviction,
uint64_t, amount,
uint64_t, arc_evict_count,
uint64_t, aw.aew_count);
/*
* We will be woken up either when arc_evict_count reaches
* aew_count, or when the ARC is no longer overflowing and
* eviction completes.
* In case of "false" wakeup, we will still be on the list.
*/
do {
cv_wait(&aw.aew_cv, &arc_evict_lock);
} while (list_link_active(&aw.aew_node));
mutex_exit(&arc_evict_lock);
cv_destroy(&aw.aew_cv);
}
}
}
/*
* Allocate a block and return it to the caller. If we are hitting the
* hard limit for the cache size, we must sleep, waiting for the eviction
* thread to catch up. If we're past the target size but below the hard
* limit, we'll only signal the reclaim thread and continue on.
*/
static void
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, const void *tag,
int alloc_flags)
{
arc_adapt(size);
/*
* If arc_size is currently overflowing, we must be adding data
* faster than we are evicting. To ensure we don't compound the
* problem by adding more data and forcing arc_size to grow even
* further past it's target size, we wait for the eviction thread to
* make some progress. We also wait for there to be sufficient free
* memory in the system, as measured by arc_free_memory().
*
* Specifically, we wait for zfs_arc_eviction_pct percent of the
* requested size to be evicted. This should be more than 100%, to
* ensure that that progress is also made towards getting arc_size
* under arc_c. See the comment above zfs_arc_eviction_pct.
*/
arc_wait_for_eviction(size * zfs_arc_eviction_pct / 100,
B_TRUE, alloc_flags & ARC_HDR_USE_RESERVE);
arc_buf_contents_t type = arc_buf_type(hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_consume(size, ARC_SPACE_META);
} else {
arc_space_consume(size, ARC_SPACE_DATA);
}
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
if (!GHOST_STATE(state)) {
(void) zfs_refcount_add_many(&state->arcs_size[type], size,
tag);
/*
* If this is reached via arc_read, the link is
* protected by the hash lock. If reached via
* arc_buf_alloc, the header should not be accessed by
* any other thread. And, if reached via arc_read_done,
* the hash lock will protect it if it's found in the
* hash table; otherwise no other thread should be
* trying to [add|remove]_reference it.
*/
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
size, tag);
}
}
}
static void
arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size,
const void *tag)
{
arc_free_data_impl(hdr, size, tag);
abd_free(abd);
}
static void
arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, const void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_free_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
zio_buf_free(buf, size);
} else {
ASSERT(type == ARC_BUFC_DATA);
zio_data_buf_free(buf, size);
}
}
/*
* Free the arc data buffer.
*/
static void
arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, const void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, tag);
}
(void) zfs_refcount_remove_many(&state->arcs_size[type], size, tag);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
}
/*
* This routine is called whenever a buffer is accessed.
*/
static void
arc_access(arc_buf_hdr_t *hdr, arc_flags_t arc_flags, boolean_t hit)
{
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Update buffer prefetch status.
*/
boolean_t was_prefetch = HDR_PREFETCH(hdr);
boolean_t now_prefetch = arc_flags & ARC_FLAG_PREFETCH;
if (was_prefetch != now_prefetch) {
if (was_prefetch) {
ARCSTAT_CONDSTAT(hit, demand_hit, demand_iohit,
HDR_PRESCIENT_PREFETCH(hdr), prescient, predictive,
prefetch);
}
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
if (was_prefetch) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH | ARC_FLAG_PRESCIENT_PREFETCH);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
}
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
if (now_prefetch) {
if (arc_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
ARCSTAT_BUMP(arcstat_prescient_prefetch);
} else {
ARCSTAT_BUMP(arcstat_predictive_prefetch);
}
}
if (arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
clock_t now = ddi_get_lbolt();
if (hdr->b_l1hdr.b_state == arc_anon) {
arc_state_t *new_state;
/*
* This buffer is not in the cache, and does not appear in
* our "ghost" lists. Add it to the MRU or uncached state.
*/
ASSERT0(hdr->b_l1hdr.b_arc_access);
hdr->b_l1hdr.b_arc_access = now;
if (HDR_UNCACHED(hdr)) {
new_state = arc_uncached;
DTRACE_PROBE1(new_state__uncached, arc_buf_hdr_t *,
hdr);
} else {
new_state = arc_mru;
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
}
arc_change_state(new_state, hdr);
} else if (hdr->b_l1hdr.b_state == arc_mru) {
/*
* This buffer has been accessed once recently and either
* its read is still in progress or it is in the cache.
*/
if (HDR_IO_IN_PROGRESS(hdr)) {
hdr->b_l1hdr.b_arc_access = now;
return;
}
hdr->b_l1hdr.b_mru_hits++;
ARCSTAT_BUMP(arcstat_mru_hits);
/*
* If the previous access was a prefetch, then it already
* handled possible promotion, so nothing more to do for now.
*/
if (was_prefetch) {
hdr->b_l1hdr.b_arc_access = now;
return;
}
/*
* If more than ARC_MINTIME have passed from the previous
* hit, promote the buffer to the MFU state.
*/
if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access +
ARC_MINTIME)) {
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr);
}
} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
arc_state_t *new_state;
/*
* This buffer has been accessed once recently, but was
* evicted from the cache. Would we have bigger MRU, it
* would be an MRU hit, so handle it the same way, except
* we don't need to check the previous access time.
*/
hdr->b_l1hdr.b_mru_ghost_hits++;
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
hdr->b_l1hdr.b_arc_access = now;
wmsum_add(&arc_mru_ghost->arcs_hits[arc_buf_type(hdr)],
arc_hdr_size(hdr));
if (was_prefetch) {
new_state = arc_mru;
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
}
arc_change_state(new_state, hdr);
} else if (hdr->b_l1hdr.b_state == arc_mfu) {
/*
* This buffer has been accessed more than once and either
* still in the cache or being restored from one of ghosts.
*/
if (!HDR_IO_IN_PROGRESS(hdr)) {
hdr->b_l1hdr.b_mfu_hits++;
ARCSTAT_BUMP(arcstat_mfu_hits);
}
hdr->b_l1hdr.b_arc_access = now;
} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
/*
* This buffer has been accessed more than once recently, but
* has been evicted from the cache. Would we have bigger MFU
* it would stay in cache, so move it back to MFU state.
*/
hdr->b_l1hdr.b_mfu_ghost_hits++;
ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
hdr->b_l1hdr.b_arc_access = now;
wmsum_add(&arc_mfu_ghost->arcs_hits[arc_buf_type(hdr)],
arc_hdr_size(hdr));
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr);
} else if (hdr->b_l1hdr.b_state == arc_uncached) {
/*
* This buffer is uncacheable, but we got a hit. Probably
* a demand read after prefetch. Nothing more to do here.
*/
if (!HDR_IO_IN_PROGRESS(hdr))
ARCSTAT_BUMP(arcstat_uncached_hits);
hdr->b_l1hdr.b_arc_access = now;
} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
/*
* This buffer is on the 2nd Level ARC and was not accessed
* for a long time, so treat it as new and put into MRU.
*/
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mru, hdr);
} else {
cmn_err(CE_PANIC, "invalid arc state 0x%p",
hdr->b_l1hdr.b_state);
}
}
/*
* This routine is called by dbuf_hold() to update the arc_access() state
* which otherwise would be skipped for entries in the dbuf cache.
*/
void
arc_buf_access(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Avoid taking the hash_lock when possible as an optimization.
* The header must be checked again under the hash_lock in order
* to handle the case where it is concurrently being released.
*/
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr))
return;
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_access_skip);
return;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu ||
hdr->b_l1hdr.b_state == arc_uncached);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, 0, B_TRUE);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(B_TRUE /* demand */, demand, prefetch,
!HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
}
/* a generic arc_read_done_func_t which you can use */
void
arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
(void) zio, (void) zb, (void) bp;
if (buf == NULL)
return;
memcpy(arg, buf->b_data, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
/* a generic arc_read_done_func_t */
void
arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
(void) zb, (void) bp;
arc_buf_t **bufp = arg;
if (buf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
*bufp = NULL;
} else {
ASSERT(zio == NULL || zio->io_error == 0);
*bufp = buf;
ASSERT(buf->b_data != NULL);
}
}
static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
ASSERT3U(arc_hdr_get_compress(hdr), ==,
BP_GET_COMPRESS(bp));
}
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp));
}
}
static void
arc_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
arc_buf_hdr_t *hdr = zio->io_private;
kmutex_t *hash_lock = NULL;
arc_callback_t *callback_list;
arc_callback_t *acb;
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
* it's in the hash table, and it should be legit since it's
* not possible to evict it during the I/O. The only possible
* reason for it not to be found is if we were freed during the
* read.
*/
if (HDR_IN_HASH_TABLE(hdr)) {
arc_buf_hdr_t *found;
ASSERT3U(hdr->b_birth, ==, BP_GET_BIRTH(zio->io_bp));
ASSERT3U(hdr->b_dva.dva_word[0], ==,
BP_IDENTITY(zio->io_bp)->dva_word[0]);
ASSERT3U(hdr->b_dva.dva_word[1], ==,
BP_IDENTITY(zio->io_bp)->dva_word[1]);
found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock);
ASSERT((found == hdr &&
DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
(found == hdr && HDR_L2_READING(hdr)));
ASSERT3P(hash_lock, !=, NULL);
}
if (BP_IS_PROTECTED(bp)) {
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
if (zio->io_error == 0) {
if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
void *tmpbuf;
tmpbuf = abd_borrow_buf_copy(zio->io_abd,
sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmpbuf,
hdr->b_crypt_hdr.b_mac);
abd_return_buf(zio->io_abd, tmpbuf,
sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp,
hdr->b_crypt_hdr.b_mac);
}
}
}
if (zio->io_error == 0) {
/* byteswap if necessary */
if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
if (BP_GET_LEVEL(zio->io_bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
if (!HDR_L2_READING(hdr)) {
hdr->b_complevel = zio->io_prop.zp_complevel;
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
if (l2arc_noprefetch && HDR_PREFETCH(hdr))
arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
callback_list = hdr->b_l1hdr.b_acb;
ASSERT3P(callback_list, !=, NULL);
hdr->b_l1hdr.b_acb = NULL;
/*
* If a read request has a callback (i.e. acb_done is not NULL), then we
* make a buf containing the data according to the parameters which were
* passed in. The implementation of arc_buf_alloc_impl() ensures that we
* aren't needlessly decompressing the data multiple times.
*/
int callback_cnt = 0;
for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
/* We need the last one to call below in original order. */
callback_list = acb;
if (!acb->acb_done || acb->acb_nobuf)
continue;
callback_cnt++;
if (zio->io_error != 0)
continue;
int error = arc_buf_alloc_impl(hdr, zio->io_spa,
&acb->acb_zb, acb->acb_private, acb->acb_encrypted,
acb->acb_compressed, acb->acb_noauth, B_TRUE,
&acb->acb_buf);
/*
* Assert non-speculative zios didn't fail because an
* encryption key wasn't loaded
*/
ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
error != EACCES);
/*
* If we failed to decrypt, report an error now (as the zio
* layer would have done if it had done the transforms).
*/
if (error == ECKSUM) {
ASSERT(BP_IS_PROTECTED(bp));
error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(zio->io_spa, &acb->acb_zb,
BP_GET_LOGICAL_BIRTH(zio->io_bp));
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
zio->io_spa, NULL, &acb->acb_zb, zio, 0);
}
}
if (error != 0) {
/*
* Decompression or decryption failed. Set
* io_error so that when we call acb_done
* (below), we will indicate that the read
* failed. Note that in the unusual case
* where one callback is compressed and another
* uncompressed, we will mark all of them
* as failed, even though the uncompressed
* one can't actually fail. In this case,
* the hdr will not be anonymous, because
* if there are multiple callbacks, it's
* because multiple threads found the same
* arc buf in the hash table.
*/
zio->io_error = error;
}
}
/*
* If there are multiple callbacks, we must have the hash lock,
* because the only way for multiple threads to find this hdr is
* in the hash table. This ensures that if there are multiple
* callbacks, the hdr is not anonymous. If it were anonymous,
* we couldn't use arc_buf_destroy() in the error case below.
*/
ASSERT(callback_cnt < 2 || hash_lock != NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hdr->b_l1hdr.b_state != arc_anon)
arc_change_state(arc_anon, hdr);
if (HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
(void) remove_reference(hdr, hdr);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* execute each callback and free its structure */
while ((acb = callback_list) != NULL) {
if (acb->acb_done != NULL) {
if (zio->io_error != 0 && acb->acb_buf != NULL) {
/*
* If arc_buf_alloc_impl() fails during
* decompression, the buf will still be
* allocated, and needs to be freed here.
*/
arc_buf_destroy(acb->acb_buf,
acb->acb_private);
acb->acb_buf = NULL;
}
acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
acb->acb_buf, acb->acb_private);
}
if (acb->acb_zio_dummy != NULL) {
acb->acb_zio_dummy->io_error = zio->io_error;
zio_nowait(acb->acb_zio_dummy);
}
callback_list = acb->acb_prev;
if (acb->acb_wait) {
mutex_enter(&acb->acb_wait_lock);
acb->acb_wait_error = zio->io_error;
acb->acb_wait = B_FALSE;
cv_signal(&acb->acb_wait_cv);
mutex_exit(&acb->acb_wait_lock);
/* acb will be freed by the waiting thread. */
} else {
kmem_free(acb, sizeof (arc_callback_t));
}
}
}
/*
* Lookup the block at the specified DVA (in bp), and return the manner in
* which the block is cached. A zero return indicates not cached.
*/
int
arc_cached(spa_t *spa, const blkptr_t *bp)
{
arc_buf_hdr_t *hdr = NULL;
kmutex_t *hash_lock = NULL;
uint64_t guid = spa_load_guid(spa);
int flags = 0;
if (BP_IS_EMBEDDED(bp))
return (ARC_CACHED_EMBEDDED);
hdr = buf_hash_find(guid, bp, &hash_lock);
if (hdr == NULL)
return (0);
if (HDR_HAS_L1HDR(hdr)) {
arc_state_t *state = hdr->b_l1hdr.b_state;
/*
* We switch to ensure that any future arc_state_type_t
* changes are handled. This is just a shift to promote
* more compile-time checking.
*/
switch (state->arcs_state) {
case ARC_STATE_ANON:
break;
case ARC_STATE_MRU:
flags |= ARC_CACHED_IN_MRU | ARC_CACHED_IN_L1;
break;
case ARC_STATE_MFU:
flags |= ARC_CACHED_IN_MFU | ARC_CACHED_IN_L1;
break;
case ARC_STATE_UNCACHED:
/* The header is still in L1, probably not for long */
flags |= ARC_CACHED_IN_L1;
break;
default:
break;
}
}
if (HDR_HAS_L2HDR(hdr))
flags |= ARC_CACHED_IN_L2;
mutex_exit(hash_lock);
return (flags);
}
/*
* "Read" the block at the specified DVA (in bp) via the
* cache. If the block is found in the cache, invoke the provided
* callback immediately and return. Note that the `zio' parameter
* in the callback will be NULL in this case, since no IO was
* required. If the block is not in the cache pass the read request
* on to the spa with a substitute callback function, so that the
* requested block will be added to the cache.
*
* If a read request arrives for a block that has a read in-progress,
* either wait for the in-progress read to complete (and return the
* results); or, if this is a read with a "done" func, add a record
* to the read to invoke the "done" func when the read completes,
* and return; or just return.
*
* arc_read_done() will invoke all the requested "done" functions
* for readers of this block.
*/
int
arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_read_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = NULL;
kmutex_t *hash_lock = NULL;
zio_t *rzio;
uint64_t guid = spa_load_guid(spa);
boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0;
boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t embedded_bp = !!BP_IS_EMBEDDED(bp);
boolean_t no_buf = *arc_flags & ARC_FLAG_NO_BUF;
arc_buf_t *buf = NULL;
int rc = 0;
ASSERT(!embedded_bp ||
BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_REDACTED(bp));
/*
* Normally SPL_FSTRANS will already be set since kernel threads which
* expect to call the DMU interfaces will set it when created. System
* calls are similarly handled by setting/cleaning the bit in the
* registered callback (module/os/.../zfs/zpl_*).
*
* External consumers such as Lustre which call the exported DMU
* interfaces may not have set SPL_FSTRANS. To avoid a deadlock
* on the hash_lock always set and clear the bit.
*/
fstrans_cookie_t cookie = spl_fstrans_mark();
top:
if (!embedded_bp) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
* Create an anonymous arc buf to back it.
*/
hdr = buf_hash_find(guid, bp, &hash_lock);
}
/*
* Determine if we have an L1 cache hit or a cache miss. For simplicity
* we maintain encrypted data separately from compressed / uncompressed
* data. If the user is requesting raw encrypted data and we don't have
* that in the header we will read from disk to guarantee that we can
* get it even if the encryption keys aren't loaded.
*/
if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) ||
(hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
boolean_t is_data = !HDR_ISTYPE_METADATA(hdr);
/*
* Verify the block pointer contents are reasonable. This
* should always be the case since the blkptr is protected by
* a checksum.
*/
if (!zfs_blkptr_verify(spa, bp, BLK_CONFIG_SKIP,
BLK_VERIFY_LOG)) {
mutex_exit(hash_lock);
rc = SET_ERROR(ECKSUM);
goto done;
}
if (HDR_IO_IN_PROGRESS(hdr)) {
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_cached_only_in_progress);
rc = SET_ERROR(ENOENT);
goto done;
}
zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
ASSERT3P(head_zio, !=, NULL);
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
* This is a sync read that needs to wait for
* an in-flight async read. Request that the
* zio have its priority upgraded.
*/
zio_change_priority(head_zio, priority);
DTRACE_PROBE1(arc__async__upgrade__sync,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_async_upgrade_sync);
}
DTRACE_PROBE1(arc__iohit, arc_buf_hdr_t *, hdr);
arc_access(hdr, *arc_flags, B_FALSE);
/*
* If there are multiple threads reading the same block
* and that block is not yet in the ARC, then only one
* thread will do the physical I/O and all other
* threads will wait until that I/O completes.
* Synchronous reads use the acb_wait_cv whereas nowait
* reads register a callback. Both are signalled/called
* in arc_read_done.
*
* Errors of the physical I/O may need to be propagated.
* Synchronous read errors are returned here from
* arc_read_done via acb_wait_error. Nowait reads
* attach the acb_zio_dummy zio to pio and
* arc_read_done propagates the physical I/O's io_error
* to acb_zio_dummy, and thereby to pio.
*/
arc_callback_t *acb = NULL;
if (done || pio || *arc_flags & ARC_FLAG_WAIT) {
acb = kmem_zalloc(sizeof (arc_callback_t),
KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_nobuf = no_buf;
if (*arc_flags & ARC_FLAG_WAIT) {
acb->acb_wait = B_TRUE;
mutex_init(&acb->acb_wait_lock, NULL,
MUTEX_DEFAULT, NULL);
cv_init(&acb->acb_wait_cv, NULL,
CV_DEFAULT, NULL);
}
acb->acb_zb = *zb;
if (pio != NULL) {
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
}
acb->acb_zio_head = head_zio;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb->acb_prev = acb;
hdr->b_l1hdr.b_acb = acb;
}
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_iohits);
ARCSTAT_CONDSTAT(!(*arc_flags & ARC_FLAG_PREFETCH),
demand, prefetch, is_data, data, metadata, iohits);
if (*arc_flags & ARC_FLAG_WAIT) {
mutex_enter(&acb->acb_wait_lock);
while (acb->acb_wait) {
cv_wait(&acb->acb_wait_cv,
&acb->acb_wait_lock);
}
rc = acb->acb_wait_error;
mutex_exit(&acb->acb_wait_lock);
mutex_destroy(&acb->acb_wait_lock);
cv_destroy(&acb->acb_wait_cv);
kmem_free(acb, sizeof (arc_callback_t));
}
goto out;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu ||
hdr->b_l1hdr.b_state == arc_uncached);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, *arc_flags, B_TRUE);
if (done && !no_buf) {
ASSERT(!embedded_bp || !BP_IS_HOLE(bp));
/* Get a buf with the desired data in it. */
rc = arc_buf_alloc_impl(hdr, spa, zb, private,
encrypted_read, compressed_read, noauth_read,
B_TRUE, &buf);
if (rc == ECKSUM) {
/*
* Convert authentication and decryption errors
* to EIO (and generate an ereport if needed)
* before leaving the ARC.
*/
rc = SET_ERROR(EIO);
if ((zio_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, zb, hdr->b_birth);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
}
if (rc != 0) {
arc_buf_destroy_impl(buf);
buf = NULL;
(void) remove_reference(hdr, private);
}
/* assert any errors weren't due to unloaded keys */
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
rc != EACCES);
}
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!(*arc_flags & ARC_FLAG_PREFETCH),
demand, prefetch, is_data, data, metadata, hits);
*arc_flags |= ARC_FLAG_CACHED;
goto done;
} else {
uint64_t lsize = BP_GET_LSIZE(bp);
uint64_t psize = BP_GET_PSIZE(bp);
arc_callback_t *acb;
vdev_t *vd = NULL;
uint64_t addr = 0;
boolean_t devw = B_FALSE;
uint64_t size;
abd_t *hdr_abd;
int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0;
arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
if (hash_lock != NULL)
mutex_exit(hash_lock);
rc = SET_ERROR(ENOENT);
goto done;
}
/*
* Verify the block pointer contents are reasonable. This
* should always be the case since the blkptr is protected by
* a checksum.
*/
if (!zfs_blkptr_verify(spa, bp,
(zio_flags & ZIO_FLAG_CONFIG_WRITER) ?
BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
if (hash_lock != NULL)
mutex_exit(hash_lock);
rc = SET_ERROR(ECKSUM);
goto done;
}
if (hdr == NULL) {
/*
* This block is not in the cache or it has
* embedded data.
*/
arc_buf_hdr_t *exists = NULL;
hdr = arc_hdr_alloc(guid, psize, lsize,
BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), 0, type);
if (!embedded_bp) {
hdr->b_dva = *BP_IDENTITY(bp);
hdr->b_birth = BP_GET_BIRTH(bp);
exists = buf_hash_insert(hdr, &hash_lock);
}
if (exists != NULL) {
/* somebody beat us to the hash insert */
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_hdr_destroy(hdr);
goto top; /* restart the IO request */
}
} else {
/*
* This block is in the ghost cache or encrypted data
* was requested and we didn't have it. If it was
* L2-only (and thus didn't have an L1 hdr),
* we realloc the header to add an L1 hdr.
*/
if (!HDR_HAS_L1HDR(hdr)) {
hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
hdr_full_cache);
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(zfs_refcount_count(
&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
* If this header already had an IO in progress
* and we are performing another IO to fetch
* encrypted data we must wait until the first
* IO completes so as not to confuse
* arc_read_done(). This should be very rare
* and so the performance impact shouldn't
* matter.
*/
arc_callback_t *acb = kmem_zalloc(
sizeof (arc_callback_t), KM_SLEEP);
acb->acb_wait = B_TRUE;
mutex_init(&acb->acb_wait_lock, NULL,
MUTEX_DEFAULT, NULL);
cv_init(&acb->acb_wait_cv, NULL, CV_DEFAULT,
NULL);
acb->acb_zio_head =
hdr->b_l1hdr.b_acb->acb_zio_head;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb->acb_prev = acb;
hdr->b_l1hdr.b_acb = acb;
mutex_exit(hash_lock);
mutex_enter(&acb->acb_wait_lock);
while (acb->acb_wait) {
cv_wait(&acb->acb_wait_cv,
&acb->acb_wait_lock);
}
mutex_exit(&acb->acb_wait_lock);
mutex_destroy(&acb->acb_wait_lock);
cv_destroy(&acb->acb_wait_cv);
kmem_free(acb, sizeof (arc_callback_t));
goto top;
}
}
if (*arc_flags & ARC_FLAG_UNCACHED) {
arc_hdr_set_flags(hdr, ARC_FLAG_UNCACHED);
if (!encrypted_read)
alloc_flags |= ARC_HDR_ALLOC_LINEAR;
}
/*
* Take additional reference for IO_IN_PROGRESS. It stops
* arc_access() from putting this header without any buffers
* and so other references but obviously nonevictable onto
* the evictable list of MRU or MFU state.
*/
add_reference(hdr, hdr);
if (!embedded_bp)
arc_access(hdr, *arc_flags, B_FALSE);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
arc_hdr_alloc_abd(hdr, alloc_flags);
if (encrypted_read) {
ASSERT(HDR_HAS_RABD(hdr));
size = HDR_GET_PSIZE(hdr);
hdr_abd = hdr->b_crypt_hdr.b_rabd;
zio_flags |= ZIO_FLAG_RAW;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
size = arc_hdr_size(hdr);
hdr_abd = hdr->b_l1hdr.b_pabd;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* For authenticated bp's, we do not ask the ZIO layer
* to authenticate them since this will cause the entire
* IO to fail if the key isn't loaded. Instead, we
* defer authentication until arc_buf_fill(), which will
* verify the data when the key is available.
*/
if (BP_IS_AUTHENTICATED(bp))
zio_flags |= ZIO_FLAG_RAW_ENCRYPT;
}
if (BP_IS_AUTHENTICATED(bp))
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
if (BP_GET_LEVEL(bp) > 0)
arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
+ acb->acb_nobuf = no_buf;
acb->acb_zb = *zb;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
hdr->b_l1hdr.b_acb = acb;
if (HDR_HAS_L2HDR(hdr) &&
(vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
devw = hdr->b_l2hdr.b_dev->l2ad_writing;
addr = hdr->b_l2hdr.b_daddr;
/*
* Lock out L2ARC device removal.
*/
if (vdev_is_dead(vd) ||
!spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
vd = NULL;
}
/*
* We count both async reads and scrub IOs as asynchronous so
* that both can be upgraded in the event of a cache hit while
* the read IO is still in-flight.
*/
if (priority == ZIO_PRIORITY_ASYNC_READ ||
priority == ZIO_PRIORITY_SCRUB)
arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
else
arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
/*
* At this point, we have a level 1 cache miss or a blkptr
* with embedded data. Try again in L2ARC if possible.
*/
ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
/*
* Skip ARC stat bump for block pointers with embedded
* data. The data are read from the blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr,
blkptr_t *, bp, uint64_t, lsize,
zbookmark_phys_t *, zb);
ARCSTAT_BUMP(arcstat_misses);
ARCSTAT_CONDSTAT(!(*arc_flags & ARC_FLAG_PREFETCH),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data,
metadata, misses);
zfs_racct_read(spa, size, 1, 0);
}
/* Check if the spa even has l2 configured */
const boolean_t spa_has_l2 = l2arc_ndev != 0 &&
spa->spa_l2cache.sav_count > 0;
if (vd != NULL && spa_has_l2 && !(l2arc_norw && devw)) {
/*
* Read from the L2ARC if the following are true:
* 1. The L2ARC vdev was previously cached.
* 2. This buffer still has L2ARC metadata.
* 3. This buffer isn't currently writing to the L2ARC.
* 4. The L2ARC entry wasn't evicted, which may
* also have invalidated the vdev.
*/
if (HDR_HAS_L2HDR(hdr) &&
!HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) {
l2arc_read_callback_t *cb;
abd_t *abd;
uint64_t asize;
DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_hits);
hdr->b_l2hdr.b_hits++;
cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
KM_SLEEP);
cb->l2rcb_hdr = hdr;
cb->l2rcb_bp = *bp;
cb->l2rcb_zb = *zb;
cb->l2rcb_flags = zio_flags;
/*
* When Compressed ARC is disabled, but the
* L2ARC block is compressed, arc_hdr_size()
* will have returned LSIZE rather than PSIZE.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr) &&
HDR_GET_PSIZE(hdr) != 0) {
size = HDR_GET_PSIZE(hdr);
}
asize = vdev_psize_to_asize(vd, size);
if (asize != size) {
abd = abd_alloc_for_io(asize,
HDR_ISTYPE_METADATA(hdr));
cb->l2rcb_abd = abd;
} else {
abd = hdr_abd;
}
ASSERT(addr >= VDEV_LABEL_START_SIZE &&
addr + asize <= vd->vdev_psize -
VDEV_LABEL_END_SIZE);
/*
* l2arc read. The SCL_L2ARC lock will be
* released by l2arc_read_done().
* Issue a null zio if the underlying buffer
* was squashed to zero size by compression.
*/
ASSERT3U(arc_hdr_get_compress(hdr), !=,
ZIO_COMPRESS_EMPTY);
rzio = zio_read_phys(pio, vd, addr,
asize, abd,
ZIO_CHECKSUM_OFF,
l2arc_read_done, cb, priority,
zio_flags | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY, B_FALSE);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes,
HDR_GET_PSIZE(hdr));
if (*arc_flags & ARC_FLAG_NOWAIT) {
zio_nowait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_WAIT);
if (zio_wait(rzio) == 0)
goto out;
/* l2arc read error; goto zio_read() */
if (hash_lock != NULL)
mutex_enter(hash_lock);
} else {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
if (HDR_L2_WRITING(hdr))
ARCSTAT_BUMP(arcstat_l2_rw_clash);
spa_config_exit(spa, SCL_L2ARC, vd);
}
} else {
if (vd != NULL)
spa_config_exit(spa, SCL_L2ARC, vd);
/*
* Only a spa with l2 should contribute to l2
* miss stats. (Including the case of having a
* faulted cache device - that's also a miss.)
*/
if (spa_has_l2) {
/*
* Skip ARC stat bump for block pointers with
* embedded data. The data are read from the
* blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
}
}
}
rzio = zio_read(pio, spa, bp, hdr_abd, size,
arc_read_done, hdr, priority, zio_flags, zb);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
if (*arc_flags & ARC_FLAG_WAIT) {
rc = zio_wait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
zio_nowait(rzio);
}
out:
/* embedded bps don't actually go to disk */
if (!embedded_bp)
spa_read_history_add(spa, zb, *arc_flags);
spl_fstrans_unmark(cookie);
return (rc);
done:
if (done)
done(NULL, zb, bp, buf, private);
if (pio && rc != 0) {
zio_t *zio = zio_null(pio, spa, NULL, NULL, NULL, zio_flags);
zio->io_error = rc;
zio_nowait(zio);
}
goto out;
}
arc_prune_t *
arc_add_prune_callback(arc_prune_func_t *func, void *private)
{
arc_prune_t *p;
p = kmem_alloc(sizeof (*p), KM_SLEEP);
p->p_pfunc = func;
p->p_private = private;
list_link_init(&p->p_node);
zfs_refcount_create(&p->p_refcnt);
mutex_enter(&arc_prune_mtx);
zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
list_insert_head(&arc_prune_list, p);
mutex_exit(&arc_prune_mtx);
return (p);
}
void
arc_remove_prune_callback(arc_prune_t *p)
{
boolean_t wait = B_FALSE;
mutex_enter(&arc_prune_mtx);
list_remove(&arc_prune_list, p);
if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
wait = B_TRUE;
mutex_exit(&arc_prune_mtx);
/* wait for arc_prune_task to finish */
if (wait)
taskq_wait_outstanding(arc_prune_taskq, 0);
ASSERT0(zfs_refcount_count(&p->p_refcnt));
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
/*
* Helper function for arc_prune_async() it is responsible for safely
* handling the execution of a registered arc_prune_func_t.
*/
static void
arc_prune_task(void *ptr)
{
arc_prune_t *ap = (arc_prune_t *)ptr;
arc_prune_func_t *func = ap->p_pfunc;
if (func != NULL)
func(ap->p_adjust, ap->p_private);
(void) zfs_refcount_remove(&ap->p_refcnt, func);
}
/*
* Notify registered consumers they must drop holds on a portion of the ARC
* buffers they reference. This provides a mechanism to ensure the ARC can
* honor the metadata limit and reclaim otherwise pinned ARC buffers.
*
* This operation is performed asynchronously so it may be safely called
* in the context of the arc_reclaim_thread(). A reference is taken here
* for each registered arc_prune_t and the arc_prune_task() is responsible
* for releasing it once the registered arc_prune_func_t has completed.
*/
static void
arc_prune_async(uint64_t adjust)
{
arc_prune_t *ap;
mutex_enter(&arc_prune_mtx);
for (ap = list_head(&arc_prune_list); ap != NULL;
ap = list_next(&arc_prune_list, ap)) {
if (zfs_refcount_count(&ap->p_refcnt) >= 2)
continue;
zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
ap->p_adjust = adjust;
if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
ap, TQ_SLEEP) == TASKQID_INVALID) {
(void) zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
continue;
}
ARCSTAT_BUMP(arcstat_prune);
}
mutex_exit(&arc_prune_mtx);
}
/*
* Notify the arc that a block was freed, and thus will never be used again.
*/
void
arc_freed(spa_t *spa, const blkptr_t *bp)
{
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint64_t guid = spa_load_guid(spa);
ASSERT(!BP_IS_EMBEDDED(bp));
hdr = buf_hash_find(guid, bp, &hash_lock);
if (hdr == NULL)
return;
/*
* We might be trying to free a block that is still doing I/O
* (i.e. prefetch) or has some other reference (i.e. a dedup-ed,
* dmu_sync-ed block). A block may also have a reference if it is
* part of a dedup-ed, dmu_synced write. The dmu_sync() function would
* have written the new block to its final resting place on disk but
* without the dedup flag set. This would have left the hdr in the MRU
* state and discoverable. When the txg finally syncs it detects that
* the block was overridden in open context and issues an override I/O.
* Since this is a dedup block, the override I/O will determine if the
* block is already in the DDT. If so, then it will replace the io_bp
* with the bp from the DDT and allow the I/O to finish. When the I/O
* reaches the done callback, dbuf_write_override_done, it will
* check to see if the io_bp and io_bp_override are identical.
* If they are not, then it indicates that the bp was replaced with
* the bp in the DDT and the override bp is freed. This allows
* us to arrive here with a reference on a block that is being
* freed. So if we have an I/O in progress, or a reference to
* this hdr, then we don't destroy the hdr.
*/
if (!HDR_HAS_L1HDR(hdr) ||
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
mutex_exit(hash_lock);
} else {
mutex_exit(hash_lock);
}
}
/*
* Release this buffer from the cache, making it an anonymous buffer. This
* must be done after a read and prior to modifying the buffer contents.
* If the buffer has more than one reference, we must make
* a new hdr for the buffer.
*/
void
arc_release(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* It would be nice to assert that if its DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
* linked into the hash table.
*/
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf);
ASSERT(ARC_BUF_LAST(buf));
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
hdr->b_l1hdr.b_arc_access = 0;
/*
* If the buf is being overridden then it may already
* have a hdr that is not empty.
*/
buf_discard_identity(hdr);
arc_buf_thaw(buf);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
/*
* This assignment is only valid as long as the hash_lock is
* held, we must be careful not to reference state or the
* b_state field after dropping the lock.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(state, !=, arc_anon);
/* this buffer is not on any list */
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
if (HDR_HAS_L2HDR(hdr)) {
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
/*
* We have to recheck this conditional again now that
* we're holding the l2ad_mtx to prevent a race with
* another thread which might be concurrently calling
* l2arc_evict(). In that case, l2arc_evict() might have
* destroyed the header's L2 portion as we were waiting
* to acquire the l2ad_mtx.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
}
/*
* Do we have more than one buf?
*/
if (hdr->b_l1hdr.b_buf != buf || !ARC_BUF_LAST(buf)) {
arc_buf_hdr_t *nhdr;
uint64_t spa = hdr->b_spa;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t lsize = HDR_GET_LSIZE(hdr);
boolean_t protected = HDR_PROTECTED(hdr);
enum zio_compress compress = arc_hdr_get_compress(hdr);
arc_buf_contents_t type = arc_buf_type(hdr);
VERIFY3U(hdr->b_type, ==, type);
ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
VERIFY3S(remove_reference(hdr, tag), >, 0);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(ARC_BUF_LAST(buf));
}
/*
* Pull the data off of this hdr and attach it to
* a new anonymous hdr. Also find the last buffer
* in the hdr's buffer list.
*/
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
ASSERT3P(lastbuf, !=, NULL);
/*
* If the current arc_buf_t and the hdr are sharing their data
* buffer, then we must stop sharing that block.
*/
if (ARC_BUF_SHARED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(!arc_buf_is_shared(lastbuf));
/*
* First, sever the block sharing relationship between
* buf and the arc_buf_hdr_t.
*/
arc_unshare_buf(hdr, buf);
/*
* Now we need to recreate the hdr's b_pabd. Since we
* have lastbuf handy, we try to share with it, but if
* we can't then we allocate a new b_pabd and copy the
* data from buf into it.
*/
if (arc_can_share(hdr, lastbuf)) {
arc_share_buf(hdr, lastbuf);
} else {
arc_hdr_alloc_abd(hdr, 0);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
buf->b_data, psize);
}
VERIFY3P(lastbuf->b_data, !=, NULL);
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_size[type],
arc_buf_size(buf), buf);
if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(
&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
/* if this is the last uncompressed buf free the checksum */
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
mutex_exit(hash_lock);
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, hdr->b_complevel, type);
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
nhdr->b_l1hdr.b_buf = buf;
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
buf->b_hdr = nhdr;
(void) zfs_refcount_add_many(&arc_anon->arcs_size[type],
arc_buf_size(buf), buf);
} else {
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
/* protected by hash lock, or hdr is on arc_anon */
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
arc_change_state(arc_anon, hdr);
hdr->b_l1hdr.b_arc_access = 0;
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_buf_thaw(buf);
}
}
int
arc_released(arc_buf_t *buf)
{
return (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_state == arc_anon);
}
#ifdef ZFS_DEBUG
int
arc_referenced(arc_buf_t *buf)
{
return (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
}
#endif
static void
arc_write_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
/*
* If we're reexecuting this zio because the pool suspended, then
* cleanup any state that was previously set the first time the
* callback was invoked.
*/
if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
if (hdr->b_l1hdr.b_pabd != NULL) {
if (ARC_BUF_SHARED(buf)) {
arc_unshare_buf(hdr, buf);
} else {
ASSERT(!arc_buf_is_shared(buf));
arc_hdr_free_abd(hdr, B_FALSE);
}
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
callback->awcb_ready(zio, buf, callback->awcb_private);
if (HDR_IO_IN_PROGRESS(hdr)) {
ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
add_reference(hdr, hdr); /* For IO_IN_PROGRESS. */
}
if (BP_IS_PROTECTED(bp)) {
/* ZIL blocks are written through zio_rewrite */
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
if (BP_SHOULD_BYTESWAP(bp)) {
if (BP_GET_LEVEL(bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_PROTECTED);
}
/*
* If this block was written for raw encryption but the zio layer
* ended up only authenticating it, adjust the buffer flags now.
*/
if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) {
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
} else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) {
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/* this must be done after the buffer flags are adjusted */
arc_cksum_compute(buf);
enum zio_compress compress;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
compress = BP_GET_COMPRESS(bp);
}
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = zio->io_prop.zp_complevel;
if (zio->io_error != 0 || psize == 0)
goto out;
/*
* Fill the hdr with data. If the buffer is encrypted we have no choice
* but to copy the data into b_radb. If the hdr is compressed, the data
* we want is available from the zio, otherwise we can take it from
* the buf.
*
* We might be able to share the buf's data with the hdr here. However,
* doing so would cause the ARC to be full of linear ABDs if we write a
* lot of shareable data. As a compromise, we check whether scattered
* ABDs are allowed, and assume that if they are then the user wants
* the ARC to be primarily filled with them regardless of the data being
* written. Therefore, if they're allowed then we allocate one and copy
* the data into it; otherwise, we share the data directly if we can.
*/
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, ARC_HDR_ALLOC_RDATA |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (!(HDR_UNCACHED(hdr) ||
abd_size_alloc_linear(arc_buf_size(buf))) ||
!arc_can_share(hdr, buf)) {
/*
* Ideally, we would always copy the io_abd into b_pabd, but the
* user may have disabled compressed ARC, thus we must check the
* hdr's compression setting rather than the io_bp's.
*/
if (BP_IS_ENCRYPTED(bp)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_ALLOC_RDATA |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
!ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
} else {
ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_USE_RESERVE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
}
} else {
ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf);
ASSERT(ARC_BUF_LAST(buf));
arc_share_buf(hdr, buf);
}
out:
arc_hdr_verify(hdr, bp);
spl_fstrans_unmark(cookie);
}
static void
arc_write_children_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
callback->awcb_children_ready(zio, buf, callback->awcb_private);
}
static void
arc_write_done(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
buf_discard_identity(hdr);
} else {
hdr->b_dva = *BP_IDENTITY(zio->io_bp);
hdr->b_birth = BP_GET_BIRTH(zio->io_bp);
}
} else {
ASSERT(HDR_EMPTY(hdr));
}
/*
* If the block to be written was all-zero or compressed enough to be
* embedded in the BP, no write was performed so there will be no
* dva/birth/checksum. The buffer must therefore remain anonymous
* (and uncached).
*/
if (!HDR_EMPTY(hdr)) {
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
ASSERT3U(zio->io_error, ==, 0);
arc_cksum_verify(buf);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists != NULL) {
/*
* This can only happen if we overwrite for
* sync-to-convergence, because we remove
* buffers from the hash table when we arc_free().
*/
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad overwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
ASSERT(zfs_refcount_is_zero(
&exists->b_l1hdr.b_refcnt));
arc_change_state(arc_anon, exists);
arc_hdr_destroy(exists);
mutex_exit(hash_lock);
exists = buf_hash_insert(hdr, &hash_lock);
ASSERT3P(exists, ==, NULL);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad nopwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
} else {
/* Dedup */
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
ASSERT(ARC_BUF_LAST(hdr->b_l1hdr.b_buf));
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
VERIFY3S(remove_reference(hdr, hdr), >, 0);
/* if it's not anon, we are doing a scrub */
if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
arc_access(hdr, 0, B_FALSE);
mutex_exit(hash_lock);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
VERIFY3S(remove_reference(hdr, hdr), >, 0);
}
callback->awcb_done(zio, buf, callback->awcb_private);
abd_free(zio->io_abd);
kmem_free(callback, sizeof (arc_write_callback_t));
}
zio_t *
arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t uncached, boolean_t l2arc,
const zio_prop_t *zp, arc_write_done_func_t *ready,
arc_write_done_func_t *children_ready, arc_write_done_func_t *done,
void *private, zio_priority_t priority, int zio_flags,
const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
arc_write_callback_t *callback;
zio_t *zio;
zio_prop_t localprop = *zp;
ASSERT3P(ready, !=, NULL);
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
if (uncached)
arc_hdr_set_flags(hdr, ARC_FLAG_UNCACHED);
else if (l2arc)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
localprop.zp_encrypt = B_TRUE;
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
memcpy(localprop.zp_salt, hdr->b_crypt_hdr.b_salt,
ZIO_DATA_SALT_LEN);
memcpy(localprop.zp_iv, hdr->b_crypt_hdr.b_iv,
ZIO_DATA_IV_LEN);
memcpy(localprop.zp_mac, hdr->b_crypt_hdr.b_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
localprop.zp_copies =
MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback->awcb_ready = ready;
callback->awcb_children_ready = children_ready;
callback->awcb_done = done;
callback->awcb_private = private;
callback->awcb_buf = buf;
/*
* The hdr's b_pabd is now stale, free it now. A new data block
* will be allocated when the zio pipeline calls arc_write_ready().
*/
if (hdr->b_l1hdr.b_pabd != NULL) {
/*
* If the buf is currently sharing the data block with
* the hdr then we need to break that relationship here.
* The hdr will remain with a NULL data pointer and the
* buf will take sole ownership of the block.
*/
if (ARC_BUF_SHARED(buf)) {
arc_unshare_buf(hdr, buf);
} else {
ASSERT(!arc_buf_is_shared(buf));
arc_hdr_free_abd(hdr, B_FALSE);
}
VERIFY3P(buf->b_data, !=, NULL);
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
if (!(zio_flags & ZIO_FLAG_RAW))
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
(children_ready != NULL) ? arc_write_children_ready : NULL,
arc_write_done, callback, priority, zio_flags, zb);
return (zio);
}
void
arc_tempreserve_clear(uint64_t reserve)
{
atomic_add_64(&arc_tempreserve, -reserve);
ASSERT((int64_t)arc_tempreserve >= 0);
}
int
arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
{
int error;
uint64_t anon_size;
if (!arc_no_grow &&
reserve > arc_c/4 &&
reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT))
arc_c = MIN(arc_c_max, reserve * 4);
/*
* Throttle when the calculated memory footprint for the TXG
* exceeds the target ARC size.
*/
if (reserve > arc_c) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reserve);
return (SET_ERROR(ERESTART));
}
/*
* Don't count loaned bufs as in flight dirty data to prevent long
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
anon_size = MAX((int64_t)
(zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]) -
arc_loaned_bytes), 0);
/*
* Writes will, almost always, require additional memory allocations
* in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
error = arc_memory_throttle(spa, reserve, txg);
if (error != 0)
return (error);
/*
* Throttle writes when the amount of dirty data in the cache
* gets too large. We try to keep the cache less than half full
* of dirty blocks so that our sync times don't grow too large.
*
* In the case of one pool being built on another pool, we want
* to make sure we don't end up throttling the lower (backing)
* pool when the upper pool is the majority contributor to dirty
* data. To insure we make forward progress during throttling, we
* also check the current pool's net dirty data and only throttle
* if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty
* data in the cache.
*
* Note: if two requests come in concurrently, we might let them
* both succeed, when one of them should fail. Not a huge deal.
*/
uint64_t total_dirty = reserve + arc_tempreserve + anon_size;
uint64_t spa_dirty_anon = spa_dirty_data(spa);
uint64_t rarc_c = arc_warm ? arc_c : arc_c_max;
if (total_dirty > rarc_c * zfs_arc_dirty_limit_percent / 100 &&
anon_size > rarc_c * zfs_arc_anon_limit_percent / 100 &&
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
#ifdef ZFS_DEBUG
uint64_t meta_esize = zfs_refcount_count(
&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
uint64_t data_esize =
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n",
(u_longlong_t)arc_tempreserve >> 10,
(u_longlong_t)meta_esize >> 10,
(u_longlong_t)data_esize >> 10,
(u_longlong_t)reserve >> 10,
(u_longlong_t)rarc_c >> 10);
#endif
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
}
static void
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
kstat_named_t *data, kstat_named_t *metadata,
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
{
data->value.ui64 =
zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]);
metadata->value.ui64 =
zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]);
size->value.ui64 = data->value.ui64 + metadata->value.ui64;
evict_data->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
evict_metadata->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
}
static int
arc_kstat_update(kstat_t *ksp, int rw)
{
arc_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
as->arcstat_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_hits);
as->arcstat_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_iohits);
as->arcstat_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_misses);
as->arcstat_demand_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_hits);
as->arcstat_demand_data_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_iohits);
as->arcstat_demand_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_misses);
as->arcstat_demand_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_hits);
as->arcstat_demand_metadata_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_iohits);
as->arcstat_demand_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_misses);
as->arcstat_prefetch_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_hits);
as->arcstat_prefetch_data_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_iohits);
as->arcstat_prefetch_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_misses);
as->arcstat_prefetch_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_hits);
as->arcstat_prefetch_metadata_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_iohits);
as->arcstat_prefetch_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_misses);
as->arcstat_mru_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_hits);
as->arcstat_mru_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_ghost_hits);
as->arcstat_mfu_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_hits);
as->arcstat_mfu_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_ghost_hits);
as->arcstat_uncached_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncached_hits);
as->arcstat_deleted.value.ui64 =
wmsum_value(&arc_sums.arcstat_deleted);
as->arcstat_mutex_miss.value.ui64 =
wmsum_value(&arc_sums.arcstat_mutex_miss);
as->arcstat_access_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_access_skip);
as->arcstat_evict_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_skip);
as->arcstat_evict_not_enough.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_not_enough);
as->arcstat_evict_l2_cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_cached);
as->arcstat_evict_l2_eligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible);
as->arcstat_evict_l2_eligible_mfu.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mfu);
as->arcstat_evict_l2_eligible_mru.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mru);
as->arcstat_evict_l2_ineligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_ineligible);
as->arcstat_evict_l2_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_skip);
as->arcstat_hash_elements.value.ui64 =
as->arcstat_hash_elements_max.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_elements);
as->arcstat_hash_collisions.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_collisions);
as->arcstat_hash_chains.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_chains);
as->arcstat_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_size);
as->arcstat_compressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_compressed_size);
as->arcstat_uncompressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncompressed_size);
as->arcstat_overhead_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_overhead_size);
as->arcstat_hdr_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_hdr_size);
as->arcstat_data_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_data_size);
as->arcstat_metadata_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_metadata_size);
as->arcstat_dbuf_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dbuf_size);
#if defined(COMPAT_FREEBSD11)
as->arcstat_other_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size) +
wmsum_value(&arc_sums.arcstat_dnode_size) +
wmsum_value(&arc_sums.arcstat_dbuf_size);
#endif
arc_kstat_update_state(arc_anon,
&as->arcstat_anon_size,
&as->arcstat_anon_data,
&as->arcstat_anon_metadata,
&as->arcstat_anon_evictable_data,
&as->arcstat_anon_evictable_metadata);
arc_kstat_update_state(arc_mru,
&as->arcstat_mru_size,
&as->arcstat_mru_data,
&as->arcstat_mru_metadata,
&as->arcstat_mru_evictable_data,
&as->arcstat_mru_evictable_metadata);
arc_kstat_update_state(arc_mru_ghost,
&as->arcstat_mru_ghost_size,
&as->arcstat_mru_ghost_data,
&as->arcstat_mru_ghost_metadata,
&as->arcstat_mru_ghost_evictable_data,
&as->arcstat_mru_ghost_evictable_metadata);
arc_kstat_update_state(arc_mfu,
&as->arcstat_mfu_size,
&as->arcstat_mfu_data,
&as->arcstat_mfu_metadata,
&as->arcstat_mfu_evictable_data,
&as->arcstat_mfu_evictable_metadata);
arc_kstat_update_state(arc_mfu_ghost,
&as->arcstat_mfu_ghost_size,
&as->arcstat_mfu_ghost_data,
&as->arcstat_mfu_ghost_metadata,
&as->arcstat_mfu_ghost_evictable_data,
&as->arcstat_mfu_ghost_evictable_metadata);
arc_kstat_update_state(arc_uncached,
&as->arcstat_uncached_size,
&as->arcstat_uncached_data,
&as->arcstat_uncached_metadata,
&as->arcstat_uncached_evictable_data,
&as->arcstat_uncached_evictable_metadata);
as->arcstat_dnode_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dnode_size);
as->arcstat_bonus_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size);
as->arcstat_l2_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_hits);
as->arcstat_l2_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_misses);
as->arcstat_l2_prefetch_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_prefetch_asize);
as->arcstat_l2_mru_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mru_asize);
as->arcstat_l2_mfu_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mfu_asize);
as->arcstat_l2_bufc_data_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_data_asize);
as->arcstat_l2_bufc_metadata_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_metadata_asize);
as->arcstat_l2_feeds.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_feeds);
as->arcstat_l2_rw_clash.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rw_clash);
as->arcstat_l2_read_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_read_bytes);
as->arcstat_l2_write_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_write_bytes);
as->arcstat_l2_writes_sent.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_sent);
as->arcstat_l2_writes_done.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_done);
as->arcstat_l2_writes_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_error);
as->arcstat_l2_writes_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_lock_retry);
as->arcstat_l2_evict_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_lock_retry);
as->arcstat_l2_evict_reading.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_reading);
as->arcstat_l2_evict_l1cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_l1cached);
as->arcstat_l2_free_on_write.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_free_on_write);
as->arcstat_l2_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_abort_lowmem);
as->arcstat_l2_cksum_bad.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_cksum_bad);
as->arcstat_l2_io_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_io_error);
as->arcstat_l2_lsize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_lsize);
as->arcstat_l2_psize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_psize);
as->arcstat_l2_hdr_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_l2_hdr_size);
as->arcstat_l2_log_blk_writes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_writes);
as->arcstat_l2_log_blk_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_asize);
as->arcstat_l2_log_blk_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_count);
as->arcstat_l2_rebuild_success.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_success);
as->arcstat_l2_rebuild_abort_unsupported.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
as->arcstat_l2_rebuild_abort_io_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
as->arcstat_l2_rebuild_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
as->arcstat_l2_rebuild_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_size);
as->arcstat_l2_rebuild_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_asize);
as->arcstat_l2_rebuild_bufs.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs);
as->arcstat_l2_rebuild_bufs_precached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs_precached);
as->arcstat_l2_rebuild_log_blks.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_log_blks);
as->arcstat_memory_throttle_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_throttle_count);
as->arcstat_memory_direct_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_direct_count);
as->arcstat_memory_indirect_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_indirect_count);
as->arcstat_memory_all_bytes.value.ui64 =
arc_all_memory();
as->arcstat_memory_free_bytes.value.ui64 =
arc_free_memory();
as->arcstat_memory_available_bytes.value.i64 =
arc_available_memory();
as->arcstat_prune.value.ui64 =
wmsum_value(&arc_sums.arcstat_prune);
as->arcstat_meta_used.value.ui64 =
wmsum_value(&arc_sums.arcstat_meta_used);
as->arcstat_async_upgrade_sync.value.ui64 =
wmsum_value(&arc_sums.arcstat_async_upgrade_sync);
as->arcstat_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_predictive_prefetch);
as->arcstat_demand_hit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_predictive_prefetch);
as->arcstat_demand_iohit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_iohit_predictive_prefetch);
as->arcstat_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_prescient_prefetch);
as->arcstat_demand_hit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_prescient_prefetch);
as->arcstat_demand_iohit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_iohit_prescient_prefetch);
as->arcstat_raw_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_raw_size);
as->arcstat_cached_only_in_progress.value.ui64 =
wmsum_value(&arc_sums.arcstat_cached_only_in_progress);
as->arcstat_abd_chunk_waste_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_abd_chunk_waste_size);
return (0);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the ARC eviction
* code is laid out; arc_evict_state() assumes ARC buffers are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
arc_state_multilist_index_func(multilist_t *ml, void *obj)
{
arc_buf_hdr_t *hdr = obj;
/*
* We rely on b_dva to generate evenly distributed index
* numbers using buf_hash below. So, as an added precaution,
* let's make sure we never add empty buffers to the arc lists.
*/
ASSERT(!HDR_EMPTY(hdr));
/*
* The assumption here, is the hash value for a given
* arc_buf_hdr_t will remain constant throughout its lifetime
* (i.e. its b_spa, b_dva, and b_birth fields don't change).
* Thus, we don't need to store the header's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
multilist_get_num_sublists(ml));
}
static unsigned int
arc_state_l2c_multilist_index_func(multilist_t *ml, void *obj)
{
panic("Header %p insert into arc_l2c_only %p", obj, ml);
}
#define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \
if ((do_warn) && (tuning) && ((tuning) != (value))) { \
cmn_err(CE_WARN, \
"ignoring tunable %s (using %llu instead)", \
(#tuning), (u_longlong_t)(value)); \
} \
} while (0)
/*
* Called during module initialization and periodically thereafter to
* apply reasonable changes to the exposed performance tunings. Can also be
* called explicitly by param_set_arc_*() functions when ARC tunables are
* updated manually. Non-zero zfs_* values which differ from the currently set
* values will be applied.
*/
void
arc_tuning_update(boolean_t verbose)
{
uint64_t allmem = arc_all_memory();
/* Valid range: 32M - <arc_c_max> */
if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
(zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_min <= arc_c_max)) {
arc_c_min = zfs_arc_min;
arc_c = MAX(arc_c, arc_c_min);
}
WARN_IF_TUNING_IGNORED(zfs_arc_min, arc_c_min, verbose);
/* Valid range: 64M - <all physical memory> */
if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
(zfs_arc_max >= MIN_ARC_MAX) && (zfs_arc_max < allmem) &&
(zfs_arc_max > arc_c_min)) {
arc_c_max = zfs_arc_max;
arc_c = MIN(arc_c, arc_c_max);
if (arc_dnode_limit > arc_c_max)
arc_dnode_limit = arc_c_max;
}
WARN_IF_TUNING_IGNORED(zfs_arc_max, arc_c_max, verbose);
/* Valid range: 0 - <all physical memory> */
arc_dnode_limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
MIN(zfs_arc_dnode_limit_percent, 100) * arc_c_max / 100;
WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit, arc_dnode_limit, verbose);
/* Valid range: 1 - N */
if (zfs_arc_grow_retry)
arc_grow_retry = zfs_arc_grow_retry;
/* Valid range: 1 - N */
if (zfs_arc_shrink_shift) {
arc_shrink_shift = zfs_arc_shrink_shift;
arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1);
}
/* Valid range: 1 - N ms */
if (zfs_arc_min_prefetch_ms)
arc_min_prefetch_ms = zfs_arc_min_prefetch_ms;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prescient_prefetch_ms) {
arc_min_prescient_prefetch_ms =
zfs_arc_min_prescient_prefetch_ms;
}
/* Valid range: 0 - 100 */
if (zfs_arc_lotsfree_percent <= 100)
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
verbose);
/* Valid range: 0 - <all physical memory> */
if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
arc_sys_free = MIN(zfs_arc_sys_free, allmem);
WARN_IF_TUNING_IGNORED(zfs_arc_sys_free, arc_sys_free, verbose);
}
static void
arc_state_multilist_init(multilist_t *ml,
multilist_sublist_index_func_t *index_func, int *maxcountp)
{
multilist_create(ml, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), index_func);
*maxcountp = MAX(*maxcountp, multilist_get_num_sublists(ml));
}
static void
arc_state_init(void)
{
int num_sublists = 0;
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
/*
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated. Special index function asserts that.
*/
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
arc_state_l2c_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
arc_state_l2c_multilist_index_func, &num_sublists);
/*
* Keep track of the number of markers needed to reclaim buffers from
* any ARC state. The markers will be pre-allocated so as to minimize
* the number of memory allocations performed by the eviction thread.
*/
arc_state_evict_marker_count = num_sublists;
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_METADATA]);
wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA], 0);
wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA], 0);
wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA], 0);
wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA], 0);
wmsum_init(&arc_sums.arcstat_hits, 0);
wmsum_init(&arc_sums.arcstat_iohits, 0);
wmsum_init(&arc_sums.arcstat_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_data_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_iohits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_iohits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_iohits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_iohits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_mru_hits, 0);
wmsum_init(&arc_sums.arcstat_mru_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_uncached_hits, 0);
wmsum_init(&arc_sums.arcstat_deleted, 0);
wmsum_init(&arc_sums.arcstat_mutex_miss, 0);
wmsum_init(&arc_sums.arcstat_access_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_not_enough, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_cached, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mfu, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mru, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_ineligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_skip, 0);
wmsum_init(&arc_sums.arcstat_hash_elements, 0);
wmsum_init(&arc_sums.arcstat_hash_collisions, 0);
wmsum_init(&arc_sums.arcstat_hash_chains, 0);
aggsum_init(&arc_sums.arcstat_size, 0);
wmsum_init(&arc_sums.arcstat_compressed_size, 0);
wmsum_init(&arc_sums.arcstat_uncompressed_size, 0);
wmsum_init(&arc_sums.arcstat_overhead_size, 0);
wmsum_init(&arc_sums.arcstat_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_data_size, 0);
wmsum_init(&arc_sums.arcstat_metadata_size, 0);
wmsum_init(&arc_sums.arcstat_dbuf_size, 0);
wmsum_init(&arc_sums.arcstat_dnode_size, 0);
wmsum_init(&arc_sums.arcstat_bonus_size, 0);
wmsum_init(&arc_sums.arcstat_l2_hits, 0);
wmsum_init(&arc_sums.arcstat_l2_misses, 0);
wmsum_init(&arc_sums.arcstat_l2_prefetch_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mru_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mfu_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_data_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_metadata_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_feeds, 0);
wmsum_init(&arc_sums.arcstat_l2_rw_clash, 0);
wmsum_init(&arc_sums.arcstat_l2_read_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_write_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_sent, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_done, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_error, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_reading, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_l1cached, 0);
wmsum_init(&arc_sums.arcstat_l2_free_on_write, 0);
wmsum_init(&arc_sums.arcstat_l2_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_cksum_bad, 0);
wmsum_init(&arc_sums.arcstat_l2_io_error, 0);
wmsum_init(&arc_sums.arcstat_l2_lsize, 0);
wmsum_init(&arc_sums.arcstat_l2_psize, 0);
aggsum_init(&arc_sums.arcstat_l2_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_writes, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_count, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_success, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_unsupported, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_io_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_dh_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_size, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs_precached, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_log_blks, 0);
wmsum_init(&arc_sums.arcstat_memory_throttle_count, 0);
wmsum_init(&arc_sums.arcstat_memory_direct_count, 0);
wmsum_init(&arc_sums.arcstat_memory_indirect_count, 0);
wmsum_init(&arc_sums.arcstat_prune, 0);
wmsum_init(&arc_sums.arcstat_meta_used, 0);
wmsum_init(&arc_sums.arcstat_async_upgrade_sync, 0);
wmsum_init(&arc_sums.arcstat_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_iohit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_iohit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_raw_size, 0);
wmsum_init(&arc_sums.arcstat_cached_only_in_progress, 0);
wmsum_init(&arc_sums.arcstat_abd_chunk_waste_size, 0);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
arc_mfu->arcs_state = ARC_STATE_MFU;
arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
arc_uncached->arcs_state = ARC_STATE_UNCACHED;
}
static void
arc_state_fini(void)
{
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_DATA]);
wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]);
wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]);
wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]);
wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]);
wmsum_fini(&arc_sums.arcstat_hits);
wmsum_fini(&arc_sums.arcstat_iohits);
wmsum_fini(&arc_sums.arcstat_misses);
wmsum_fini(&arc_sums.arcstat_demand_data_hits);
wmsum_fini(&arc_sums.arcstat_demand_data_iohits);
wmsum_fini(&arc_sums.arcstat_demand_data_misses);
wmsum_fini(&arc_sums.arcstat_demand_metadata_hits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_iohits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_data_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_iohits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_iohits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_misses);
wmsum_fini(&arc_sums.arcstat_mru_hits);
wmsum_fini(&arc_sums.arcstat_mru_ghost_hits);
wmsum_fini(&arc_sums.arcstat_mfu_hits);
wmsum_fini(&arc_sums.arcstat_mfu_ghost_hits);
wmsum_fini(&arc_sums.arcstat_uncached_hits);
wmsum_fini(&arc_sums.arcstat_deleted);
wmsum_fini(&arc_sums.arcstat_mutex_miss);
wmsum_fini(&arc_sums.arcstat_access_skip);
wmsum_fini(&arc_sums.arcstat_evict_skip);
wmsum_fini(&arc_sums.arcstat_evict_not_enough);
wmsum_fini(&arc_sums.arcstat_evict_l2_cached);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mfu);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mru);
wmsum_fini(&arc_sums.arcstat_evict_l2_ineligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_skip);
wmsum_fini(&arc_sums.arcstat_hash_elements);
wmsum_fini(&arc_sums.arcstat_hash_collisions);
wmsum_fini(&arc_sums.arcstat_hash_chains);
aggsum_fini(&arc_sums.arcstat_size);
wmsum_fini(&arc_sums.arcstat_compressed_size);
wmsum_fini(&arc_sums.arcstat_uncompressed_size);
wmsum_fini(&arc_sums.arcstat_overhead_size);
wmsum_fini(&arc_sums.arcstat_hdr_size);
wmsum_fini(&arc_sums.arcstat_data_size);
wmsum_fini(&arc_sums.arcstat_metadata_size);
wmsum_fini(&arc_sums.arcstat_dbuf_size);
wmsum_fini(&arc_sums.arcstat_dnode_size);
wmsum_fini(&arc_sums.arcstat_bonus_size);
wmsum_fini(&arc_sums.arcstat_l2_hits);
wmsum_fini(&arc_sums.arcstat_l2_misses);
wmsum_fini(&arc_sums.arcstat_l2_prefetch_asize);
wmsum_fini(&arc_sums.arcstat_l2_mru_asize);
wmsum_fini(&arc_sums.arcstat_l2_mfu_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_data_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_metadata_asize);
wmsum_fini(&arc_sums.arcstat_l2_feeds);
wmsum_fini(&arc_sums.arcstat_l2_rw_clash);
wmsum_fini(&arc_sums.arcstat_l2_read_bytes);
wmsum_fini(&arc_sums.arcstat_l2_write_bytes);
wmsum_fini(&arc_sums.arcstat_l2_writes_sent);
wmsum_fini(&arc_sums.arcstat_l2_writes_done);
wmsum_fini(&arc_sums.arcstat_l2_writes_error);
wmsum_fini(&arc_sums.arcstat_l2_writes_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_reading);
wmsum_fini(&arc_sums.arcstat_l2_evict_l1cached);
wmsum_fini(&arc_sums.arcstat_l2_free_on_write);
wmsum_fini(&arc_sums.arcstat_l2_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_cksum_bad);
wmsum_fini(&arc_sums.arcstat_l2_io_error);
wmsum_fini(&arc_sums.arcstat_l2_lsize);
wmsum_fini(&arc_sums.arcstat_l2_psize);
aggsum_fini(&arc_sums.arcstat_l2_hdr_size);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_writes);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_asize);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_count);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_success);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_size);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_asize);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs_precached);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_log_blks);
wmsum_fini(&arc_sums.arcstat_memory_throttle_count);
wmsum_fini(&arc_sums.arcstat_memory_direct_count);
wmsum_fini(&arc_sums.arcstat_memory_indirect_count);
wmsum_fini(&arc_sums.arcstat_prune);
wmsum_fini(&arc_sums.arcstat_meta_used);
wmsum_fini(&arc_sums.arcstat_async_upgrade_sync);
wmsum_fini(&arc_sums.arcstat_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_iohit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_iohit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_raw_size);
wmsum_fini(&arc_sums.arcstat_cached_only_in_progress);
wmsum_fini(&arc_sums.arcstat_abd_chunk_waste_size);
}
uint64_t
arc_target_bytes(void)
{
return (arc_c);
}
void
arc_set_limits(uint64_t allmem)
{
/* Set min cache to 1/32 of all memory, or 32MB, whichever is more. */
arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT);
/* How to set default max varies by platform. */
arc_c_max = arc_default_max(arc_c_min, allmem);
}
void
arc_init(void)
{
uint64_t percent, allmem = arc_all_memory();
mutex_init(&arc_evict_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&arc_evict_waiters, sizeof (arc_evict_waiter_t),
offsetof(arc_evict_waiter_t, aew_node));
arc_min_prefetch_ms = 1000;
arc_min_prescient_prefetch_ms = 6000;
#if defined(_KERNEL)
arc_lowmem_init();
#endif
arc_set_limits(allmem);
#ifdef _KERNEL
/*
* If zfs_arc_max is non-zero at init, meaning it was set in the kernel
* environment before the module was loaded, don't block setting the
* maximum because it is less than arc_c_min, instead, reset arc_c_min
* to a lower value.
* zfs_arc_min will be handled by arc_tuning_update().
*/
if (zfs_arc_max != 0 && zfs_arc_max >= MIN_ARC_MAX &&
zfs_arc_max < allmem) {
arc_c_max = zfs_arc_max;
if (arc_c_min >= arc_c_max) {
arc_c_min = MAX(zfs_arc_max / 2,
2ULL << SPA_MAXBLOCKSHIFT);
}
}
#else
/*
* In userland, there's only the memory pressure that we artificially
* create (see arc_available_memory()). Don't let arc_c get too
* small, because it can cause transactions to be larger than
* arc_c, causing arc_tempreserve_space() to fail.
*/
arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT);
#endif
arc_c = arc_c_min;
/*
* 32-bit fixed point fractions of metadata from total ARC size,
* MRU data from all data and MRU metadata from all metadata.
*/
arc_meta = (1ULL << 32) / 4; /* Metadata is 25% of arc_c. */
arc_pd = (1ULL << 32) / 2; /* Data MRU is 50% of data. */
arc_pm = (1ULL << 32) / 2; /* Metadata MRU is 50% of metadata. */
percent = MIN(zfs_arc_dnode_limit_percent, 100);
arc_dnode_limit = arc_c_max * percent / 100;
/* Apply user specified tunings */
arc_tuning_update(B_TRUE);
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
if (arc_c < arc_c_min)
arc_c = arc_c_min;
arc_register_hotplug();
arc_state_init();
buf_init();
list_create(&arc_prune_list, sizeof (arc_prune_t),
offsetof(arc_prune_t, p_node));
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
arc_prune_taskq = taskq_create("arc_prune", zfs_arc_prune_task_threads,
defclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
list_create(&arc_async_flush_list, sizeof (arc_async_flush_t),
offsetof(arc_async_flush_t, af_node));
mutex_init(&arc_async_flush_lock, NULL, MUTEX_DEFAULT, NULL);
arc_flush_taskq = taskq_create("arc_flush", MIN(boot_ncpus, 4),
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (arc_ksp != NULL) {
arc_ksp->ks_data = &arc_stats;
arc_ksp->ks_update = arc_kstat_update;
kstat_install(arc_ksp);
}
arc_state_evict_markers =
arc_state_alloc_markers(arc_state_evict_marker_count);
arc_evict_zthr = zthr_create_timer("arc_evict",
arc_evict_cb_check, arc_evict_cb, NULL, SEC2NSEC(1), defclsyspri);
arc_reap_zthr = zthr_create_timer("arc_reap",
arc_reap_cb_check, arc_reap_cb, NULL, SEC2NSEC(1), minclsyspri);
arc_warm = B_FALSE;
/*
* Calculate maximum amount of dirty data per pool.
*
* If it has been set by a module parameter, take that.
* Otherwise, use a percentage of physical memory defined by
* zfs_dirty_data_max_percent (default 10%) with a cap at
* zfs_dirty_data_max_max (default 4G or 25% of physical memory).
*/
#ifdef __LP64__
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#else
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#endif
if (zfs_dirty_data_max == 0) {
zfs_dirty_data_max = allmem *
zfs_dirty_data_max_percent / 100;
zfs_dirty_data_max = MIN(zfs_dirty_data_max,
zfs_dirty_data_max_max);
}
if (zfs_wrlog_data_max == 0) {
/*
* dp_wrlog_total is reduced for each txg at the end of
* spa_sync(). However, dp_dirty_total is reduced every time
* a block is written out. Thus under normal operation,
* dp_wrlog_total could grow 2 times as big as
* zfs_dirty_data_max.
*/
zfs_wrlog_data_max = zfs_dirty_data_max * 2;
}
}
void
arc_fini(void)
{
arc_prune_t *p;
#ifdef _KERNEL
arc_lowmem_fini();
#endif /* _KERNEL */
/* Wait for any background flushes */
taskq_wait(arc_flush_taskq);
taskq_destroy(arc_flush_taskq);
/* Use B_TRUE to ensure *all* buffers are evicted */
arc_flush(NULL, B_TRUE);
if (arc_ksp != NULL) {
kstat_delete(arc_ksp);
arc_ksp = NULL;
}
taskq_wait(arc_prune_taskq);
taskq_destroy(arc_prune_taskq);
list_destroy(&arc_async_flush_list);
mutex_destroy(&arc_async_flush_lock);
mutex_enter(&arc_prune_mtx);
while ((p = list_remove_head(&arc_prune_list)) != NULL) {
(void) zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
mutex_exit(&arc_prune_mtx);
list_destroy(&arc_prune_list);
mutex_destroy(&arc_prune_mtx);
(void) zthr_cancel(arc_evict_zthr);
(void) zthr_cancel(arc_reap_zthr);
arc_state_free_markers(arc_state_evict_markers,
arc_state_evict_marker_count);
mutex_destroy(&arc_evict_lock);
list_destroy(&arc_evict_waiters);
/*
* Free any buffers that were tagged for destruction. This needs
* to occur before arc_state_fini() runs and destroys the aggsum
* values which are updated when freeing scatter ABDs.
*/
l2arc_do_free_on_write();
/*
* buf_fini() must proceed arc_state_fini() because buf_fin() may
* trigger the release of kmem magazines, which can callback to
* arc_space_return() which accesses aggsums freed in act_state_fini().
*/
buf_fini();
arc_state_fini();
arc_unregister_hotplug();
/*
* We destroy the zthrs after all the ARC state has been
* torn down to avoid the case of them receiving any
* wakeup() signals after they are destroyed.
*/
zthr_destroy(arc_evict_zthr);
zthr_destroy(arc_reap_zthr);
ASSERT0(arc_loaned_bytes);
}
/*
* Level 2 ARC
*
* The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
* It uses dedicated storage devices to hold cached data, which are populated
* using large infrequent writes. The main role of this cache is to boost
* the performance of random read workloads. The intended L2ARC devices
* include short-stroked disks, solid state disks, and other media with
* substantially faster read latency than disk.
*
* +-----------------------+
* | ARC |
* +-----------------------+
* | ^ ^
* | | |
* l2arc_feed_thread() arc_read()
* | | |
* | l2arc read |
* V | |
* +---------------+ |
* | L2ARC | |
* +---------------+ |
* | ^ |
* l2arc_write() | |
* | | |
* V | |
* +-------+ +-------+
* | vdev | | vdev |
* | cache | | cache |
* +-------+ +-------+
* +=========+ .-----.
* : L2ARC : |-_____-|
* : devices : | Disks |
* +=========+ `-_____-'
*
* Read requests are satisfied from the following sources, in order:
*
* 1) ARC
* 2) vdev cache of L2ARC devices
* 3) L2ARC devices
* 4) vdev cache of disks
* 5) disks
*
* Some L2ARC device types exhibit extremely slow write performance.
* To accommodate for this there are some significant differences between
* the L2ARC and traditional cache design:
*
* 1. There is no eviction path from the ARC to the L2ARC. Evictions from
* the ARC behave as usual, freeing buffers and placing headers on ghost
* lists. The ARC does not send buffers to the L2ARC during eviction as
* this would add inflated write latencies for all ARC memory pressure.
*
* 2. The L2ARC attempts to cache data from the ARC before it is evicted.
* It does this by periodically scanning buffers from the eviction-end of
* the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
* not already there. It scans until a headroom of buffers is satisfied,
* which itself is a buffer for ARC eviction. If a compressible buffer is
* found during scanning and selected for writing to an L2ARC device, we
* temporarily boost scanning headroom during the next scan cycle to make
* sure we adapt to compression effects (which might significantly reduce
* the data volume we write to L2ARC). The thread that does this is
* l2arc_feed_thread(), illustrated below; example sizes are included to
* provide a better sense of ratio than this diagram:
*
* head --> tail
* +---------------------+----------+
* ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
* +---------------------+----------+ | o L2ARC eligible
* ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
* +---------------------+----------+ |
* 15.9 Gbytes ^ 32 Mbytes |
* headroom |
* l2arc_feed_thread()
* |
* l2arc write hand <--[oooo]--'
* | 8 Mbyte
* | write max
* V
* +==============================+
* L2ARC dev |####|#|###|###| |####| ... |
* +==============================+
* 32 Gbytes
*
* 3. If an ARC buffer is copied to the L2ARC but then hit instead of
* evicted, then the L2ARC has cached a buffer much sooner than it probably
* needed to, potentially wasting L2ARC device bandwidth and storage. It is
* safe to say that this is an uncommon case, since buffers at the end of
* the ARC lists have moved there due to inactivity.
*
* 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
* then the L2ARC simply misses copying some buffers. This serves as a
* pressure valve to prevent heavy read workloads from both stalling the ARC
* with waits and clogging the L2ARC with writes. This also helps prevent
* the potential for the L2ARC to churn if it attempts to cache content too
* quickly, such as during backups of the entire pool.
*
* 5. After system boot and before the ARC has filled main memory, there are
* no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
* lists can remain mostly static. Instead of searching from tail of these
* lists as pictured, the l2arc_feed_thread() will search from the list heads
* for eligible buffers, greatly increasing its chance of finding them.
*
* The L2ARC device write speed is also boosted during this time so that
* the L2ARC warms up faster. Since there have been no ARC evictions yet,
* there are no L2ARC reads, and no fear of degrading read performance
* through increased writes.
*
* 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
* the vdev queue can aggregate them into larger and fewer writes. Each
* device is written to in a rotor fashion, sweeping writes through
* available space then repeating.
*
* 7. The L2ARC does not store dirty content. It never needs to flush
* write buffers back to disk based storage.
*
* 8. If an ARC buffer is written (and dirtied) which also exists in the
* L2ARC, the now stale L2ARC buffer is immediately dropped.
*
* The performance of the L2ARC can be tweaked by a number of tunables, which
* may be necessary for different workloads:
*
* l2arc_write_max max write bytes per interval
* l2arc_write_boost extra write bytes during device warmup
* l2arc_noprefetch skip caching prefetched buffers
* l2arc_headroom number of max device writes to precache
* l2arc_headroom_boost when we find compressed buffers during ARC
* scanning, we multiply headroom by this
* percentage factor for the next scan cycle,
* since more compressed buffers are likely to
* be present
* l2arc_feed_secs seconds between L2ARC writing
*
* Tunables may be removed or added as future performance improvements are
* integrated, and also may become zpool properties.
*
* There are three key functions that control how the L2ARC warms up:
*
* l2arc_write_eligible() check if a buffer is eligible to cache
* l2arc_write_size() calculate how much to write
* l2arc_write_interval() calculate sleep delay between writes
*
* These three functions determine what to write, how much, and how quickly
* to send writes.
*
* L2ARC persistence:
*
* When writing buffers to L2ARC, we periodically add some metadata to
* make sure we can pick them up after reboot, thus dramatically reducing
* the impact that any downtime has on the performance of storage systems
* with large caches.
*
* The implementation works fairly simply by integrating the following two
* modifications:
*
* *) When writing to the L2ARC, we occasionally write a "l2arc log block",
* which is an additional piece of metadata which describes what's been
* written. This allows us to rebuild the arc_buf_hdr_t structures of the
* main ARC buffers. There are 2 linked-lists of log blocks headed by
* dh_start_lbps[2]. We alternate which chain we append to, so they are
* time-wise and offset-wise interleaved, but that is an optimization rather
* than for correctness. The log block also includes a pointer to the
* previous block in its chain.
*
* *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device
* for our header bookkeeping purposes. This contains a device header,
* which contains our top-level reference structures. We update it each
* time we write a new log block, so that we're able to locate it in the
* L2ARC device. If this write results in an inconsistent device header
* (e.g. due to power failure), we detect this by verifying the header's
* checksum and simply fail to reconstruct the L2ARC after reboot.
*
* Implementation diagram:
*
* +=== L2ARC device (not to scale) ======================================+
* | ___two newest log block pointers__.__________ |
* | / \dh_start_lbps[1] |
* | / \ \dh_start_lbps[0]|
* |.___/__. V V |
* ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
* || hdr| ^ /^ /^ / / |
* |+------+ ...--\-------/ \-----/--\------/ / |
* | \--------------/ \--------------/ |
* +======================================================================+
*
* As can be seen on the diagram, rather than using a simple linked list,
* we use a pair of linked lists with alternating elements. This is a
* performance enhancement due to the fact that we only find out the
* address of the next log block access once the current block has been
* completely read in. Obviously, this hurts performance, because we'd be
* keeping the device's I/O queue at only a 1 operation deep, thus
* incurring a large amount of I/O round-trip latency. Having two lists
* allows us to fetch two log blocks ahead of where we are currently
* rebuilding L2ARC buffers.
*
* On-device data structures:
*
* L2ARC device header: l2arc_dev_hdr_phys_t
* L2ARC log block: l2arc_log_blk_phys_t
*
* L2ARC reconstruction:
*
* When writing data, we simply write in the standard rotary fashion,
* evicting buffers as we go and simply writing new data over them (writing
* a new log block every now and then). This obviously means that once we
* loop around the end of the device, we will start cutting into an already
* committed log block (and its referenced data buffers), like so:
*
* current write head__ __old tail
* \ /
* V V
* <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
* ^ ^^^^^^^^^___________________________________
* | \
* <<nextwrite>> may overwrite this blk and/or its bufs --'
*
* When importing the pool, we detect this situation and use it to stop
* our scanning process (see l2arc_rebuild).
*
* There is one significant caveat to consider when rebuilding ARC contents
* from an L2ARC device: what about invalidated buffers? Given the above
* construction, we cannot update blocks which we've already written to amend
* them to remove buffers which were invalidated. Thus, during reconstruction,
* we might be populating the cache with buffers for data that's not on the
* main pool anymore, or may have been overwritten!
*
* As it turns out, this isn't a problem. Every arc_read request includes
* both the DVA and, crucially, the birth TXG of the BP the caller is
* looking for. So even if the cache were populated by completely rotten
* blocks for data that had been long deleted and/or overwritten, we'll
* never actually return bad data from the cache, since the DVA with the
* birth TXG uniquely identify a block in space and time - once created,
* a block is immutable on disk. The worst thing we have done is wasted
* some time and memory at l2arc rebuild to reconstruct outdated ARC
* entries that will get dropped from the l2arc as it is being updated
* with new blocks.
*
* L2ARC buffers that have been evicted by l2arc_evict() ahead of the write
* hand are not restored. This is done by saving the offset (in bytes)
* l2arc_evict() has evicted to in the L2ARC device header and taking it
* into account when restoring buffers.
*/
static boolean_t
l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
{
/*
* A buffer is *not* eligible for the L2ARC if it:
* 1. belongs to a different spa.
* 2. is already cached on the L2ARC.
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
return (B_FALSE);
return (B_TRUE);
}
static uint64_t
l2arc_write_size(l2arc_dev_t *dev)
{
uint64_t size;
/*
* Make sure our globals have meaningful values in case the user
* altered them.
*/
size = l2arc_write_max;
if (size == 0) {
cmn_err(CE_NOTE, "l2arc_write_max must be greater than zero, "
"resetting it to the default (%d)", L2ARC_WRITE_SIZE);
size = l2arc_write_max = L2ARC_WRITE_SIZE;
}
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
/* We need to add in the worst case scenario of log block overhead. */
size += l2arc_log_blk_overhead(size, dev);
if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0) {
/*
* Trim ahead of the write size 64MB or (l2arc_trim_ahead/100)
* times the writesize, whichever is greater.
*/
size += MAX(64 * 1024 * 1024,
(size * l2arc_trim_ahead) / 100);
}
/*
* Make sure the write size does not exceed the size of the cache
* device. This is important in l2arc_evict(), otherwise infinite
* iteration can occur.
*/
size = MIN(size, (dev->l2ad_end - dev->l2ad_start) / 4);
size = P2ROUNDUP(size, 1ULL << dev->l2ad_vdev->vdev_ashift);
return (size);
}
static clock_t
l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
{
clock_t interval, next, now;
/*
* If the ARC lists are busy, increase our write rate; if the
* lists are stale, idle back. This is achieved by checking
* how much we previously wrote - if it was more than half of
* what we wanted, schedule the next write much sooner.
*/
if (l2arc_feed_again && wrote > (wanted / 2))
interval = (hz * l2arc_feed_min_ms) / 1000;
else
interval = hz * l2arc_feed_secs;
now = ddi_get_lbolt();
next = MAX(now, MIN(now + interval, began + interval));
return (next);
}
static boolean_t
l2arc_dev_invalid(const l2arc_dev_t *dev)
{
/*
* We want to skip devices that are being rebuilt, trimmed,
* removed, or belong to a spa that is being exported.
*/
return (dev->l2ad_vdev == NULL || vdev_is_dead(dev->l2ad_vdev) ||
dev->l2ad_rebuild || dev->l2ad_trim_all ||
dev->l2ad_spa == NULL || dev->l2ad_spa->spa_is_exporting);
}
/*
* Cycle through L2ARC devices. This is how L2ARC load balances.
* If a device is returned, this also returns holding the spa config lock.
*/
static l2arc_dev_t *
l2arc_dev_get_next(void)
{
l2arc_dev_t *first, *next = NULL;
/*
* Lock out the removal of spas (spa_namespace_lock), then removal
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
mutex_enter(&spa_namespace_lock);
mutex_enter(&l2arc_dev_mtx);
/* if there are no vdevs, there is nothing to do */
if (l2arc_ndev == 0)
goto out;
first = NULL;
next = l2arc_dev_last;
do {
/* loop around the list looking for a non-faulted vdev */
if (next == NULL) {
next = list_head(l2arc_dev_list);
} else {
next = list_next(l2arc_dev_list, next);
if (next == NULL)
next = list_head(l2arc_dev_list);
}
/* if we have come back to the start, bail out */
if (first == NULL)
first = next;
else if (next == first)
break;
ASSERT3P(next, !=, NULL);
} while (l2arc_dev_invalid(next));
/* if we were unable to find any usable vdevs, return NULL */
if (l2arc_dev_invalid(next))
next = NULL;
l2arc_dev_last = next;
out:
mutex_exit(&l2arc_dev_mtx);
/*
* Grab the config lock to prevent the 'next' device from being
* removed while we are writing to it.
*/
if (next != NULL)
spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
mutex_exit(&spa_namespace_lock);
return (next);
}
/*
* Free buffers that were tagged for destruction.
*/
static void
l2arc_do_free_on_write(void)
{
l2arc_data_free_t *df;
mutex_enter(&l2arc_free_on_write_mtx);
while ((df = list_remove_head(l2arc_free_on_write)) != NULL) {
ASSERT3P(df->l2df_abd, !=, NULL);
abd_free(df->l2df_abd);
kmem_free(df, sizeof (l2arc_data_free_t));
}
mutex_exit(&l2arc_free_on_write_mtx);
}
/*
* A write to a cache device has completed. Update all headers to allow
* reads from these buffers to begin.
*/
static void
l2arc_write_done(zio_t *zio)
{
l2arc_write_callback_t *cb;
l2arc_lb_abd_buf_t *abd_buf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
l2arc_dev_t *dev;
l2arc_dev_hdr_phys_t *l2dhdr;
list_t *buflist;
arc_buf_hdr_t *head, *hdr, *hdr_prev;
kmutex_t *hash_lock;
int64_t bytes_dropped = 0;
cb = zio->io_private;
ASSERT3P(cb, !=, NULL);
dev = cb->l2wcb_dev;
l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev, !=, NULL);
head = cb->l2wcb_head;
ASSERT3P(head, !=, NULL);
buflist = &dev->l2ad_buflist;
ASSERT3P(buflist, !=, NULL);
DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
l2arc_write_callback_t *, cb);
/*
* All writes completed, or an error was hit.
*/
top:
mutex_enter(&dev->l2ad_mtx);
for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. We must retry so we
* don't leave the ARC_FLAG_L2_WRITING bit set.
*/
ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
/*
* We don't want to rescan the headers we've
* already marked as having been written out, so
* we reinsert the head node so we can pick up
* where we left off.
*/
list_remove(buflist, head);
list_insert_after(buflist, hdr, head);
mutex_exit(&dev->l2ad_mtx);
/*
* We wait for the hash lock to become available
* to try and prevent busy waiting, and increase
* the chance we'll be able to acquire the lock
* the next time around.
*/
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* We could not have been moved into the arc_l2c_only
* state while in-flight due to our ARC_FLAG_L2_WRITING
* bit being set. Let's just ensure that's being enforced.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Skipped - drop L2ARC entry and mark the header as no
* longer L2 eligibile.
*/
if (zio->io_error != 0) {
/*
* Error - drop L2ARC entry.
*/
list_remove(buflist, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
uint64_t psize = HDR_GET_PSIZE(hdr);
l2arc_hdr_arcstats_decrement(hdr);
ASSERT(dev->l2ad_vdev != NULL);
bytes_dropped +=
vdev_psize_to_asize(dev->l2ad_vdev, psize);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
}
/*
* Allow ARC to begin reads and ghost list evictions to
* this L2ARC entry.
*/
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
}
/*
* Free the allocated abd buffers for writing the log blocks.
* If the zio failed reclaim the allocated space and remove the
* pointers to these log blocks from the log block pointer list
* of the L2ARC device.
*/
while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) {
abd_free(abd_buf->abd);
zio_buf_free(abd_buf, sizeof (*abd_buf));
if (zio->io_error != 0) {
lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list);
/*
* L2BLK_GET_PSIZE returns aligned size for log
* blocks.
*/
uint64_t asize =
L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop);
bytes_dropped += asize;
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
(void) zfs_refcount_remove(&dev->l2ad_lb_count,
lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
list_destroy(&cb->l2wcb_abd_list);
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_writes_error);
/*
* Restore the lbps array in the header to its previous state.
* If the list of log block pointers is empty, zero out the
* log block pointers in the device header.
*/
lb_ptr_buf = list_head(&dev->l2ad_lbptr_list);
for (int i = 0; i < 2; i++) {
if (lb_ptr_buf == NULL) {
/*
* If the list is empty zero out the device
* header. Otherwise zero out the second log
* block pointer in the header.
*/
if (i == 0) {
memset(l2dhdr, 0,
dev->l2ad_dev_hdr_asize);
} else {
memset(&l2dhdr->dh_start_lbps[i], 0,
sizeof (l2arc_log_blkptr_t));
}
break;
}
memcpy(&l2dhdr->dh_start_lbps[i], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
lb_ptr_buf = list_next(&dev->l2ad_lbptr_list,
lb_ptr_buf);
}
}
ARCSTAT_BUMP(arcstat_l2_writes_done);
list_remove(buflist, head);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
mutex_exit(&dev->l2ad_mtx);
ASSERT(dev->l2ad_vdev != NULL);
vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
l2arc_do_free_on_write();
kmem_free(cb, sizeof (l2arc_write_callback_t));
}
static int
l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
{
int ret;
spa_t *spa = zio->io_spa;
arc_buf_hdr_t *hdr = cb->l2rcb_hdr;
blkptr_t *bp = zio->io_bp;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/*
* ZIL data is never be written to the L2ARC, so we don't need
* special handling for its unique MAC storage.
*/
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* If the data was encrypted, decrypt it now. Note that
* we must check the bp here and not the hdr, since the
* hdr does not have its encryption parameters updated
* until arc_read_done().
*/
if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_USE_RESERVE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, HDR_GET_PSIZE(hdr), eabd,
hdr->b_l1hdr.b_pabd, &no_crypt);
if (ret != 0) {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
goto error;
}
/*
* If we actually performed decryption, replace b_pabd
* with the decrypted data. Otherwise we can just throw
* our decryption buffer away.
*/
if (!no_crypt) {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = eabd;
zio->io_abd = eabd;
} else {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
}
}
/*
* If the L2ARC block was compressed, but ARC compression
* is disabled we decompress the data into a new buffer and
* replace the existing data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_USE_RESERVE);
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, cabd, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr);
goto error;
}
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
zio->io_abd = cabd;
zio->io_size = HDR_GET_LSIZE(hdr);
}
return (0);
error:
return (ret);
}
/*
* A read to a cache device completed. Validate buffer contents before
* handing over to the regular ARC routines.
*/
static void
l2arc_read_done(zio_t *zio)
{
int tfm_error = 0;
l2arc_read_callback_t *cb = zio->io_private;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
boolean_t valid_cksum;
boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) &&
(cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT));
ASSERT3P(zio->io_vd, !=, NULL);
ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
ASSERT3P(cb, !=, NULL);
hdr = cb->l2rcb_hdr;
ASSERT3P(hdr, !=, NULL);
hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
/*
* If the data was read into a temporary buffer,
* move it and free the buffer.
*/
if (cb->l2rcb_abd != NULL) {
ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
if (zio->io_error == 0) {
if (using_rdata) {
abd_copy(hdr->b_crypt_hdr.b_rabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
} else {
abd_copy(hdr->b_l1hdr.b_pabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
}
}
/*
* The following must be done regardless of whether
* there was an error:
* - free the temporary buffer
* - point zio to the real ARC buffer
* - set zio size accordingly
* These are required because zio is either re-used for
* an I/O of the block in the case of the error
* or the zio is passed to arc_read_done() and it
* needs real data.
*/
abd_free(cb->l2rcb_abd);
zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
if (using_rdata) {
ASSERT(HDR_HAS_RABD(hdr));
zio->io_abd = zio->io_orig_abd =
hdr->b_crypt_hdr.b_rabd;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
}
}
ASSERT3P(zio->io_abd, !=, NULL);
/*
* Check this survived the L2ARC journey.
*/
ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd ||
(HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd));
zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
zio->io_prop.zp_complevel = hdr->b_complevel;
valid_cksum = arc_cksum_is_equal(hdr, zio);
/*
* b_rabd will always match the data as it exists on disk if it is
* being used. Therefore if we are reading into b_rabd we do not
* attempt to untransform the data.
*/
if (valid_cksum && !using_rdata)
tfm_error = l2arc_untransform(zio, cb);
if (valid_cksum && tfm_error == 0 && zio->io_error == 0 &&
!HDR_L2_EVICTED(hdr)) {
mutex_exit(hash_lock);
zio->io_private = hdr;
arc_read_done(zio);
} else {
/*
* Buffer didn't survive caching. Increment stats and
* reissue to the original storage device.
*/
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = SET_ERROR(EIO);
}
if (!valid_cksum || tfm_error != 0)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);
/*
* If there's no waiter, issue an async i/o to the primary
* storage now. If there *is* a waiter, the caller must
* issue the i/o in a context where it's OK to block.
*/
if (zio->io_waiter == NULL) {
zio_t *pio = zio_unique_parent(zio);
void *abd = (using_rdata) ?
hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd;
ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
zio = zio_read(pio, zio->io_spa, zio->io_bp,
abd, zio->io_size, arc_read_done,
hdr, zio->io_priority, cb->l2rcb_flags,
&cb->l2rcb_zb);
/*
* Original ZIO will be freed, so we need to update
* ARC header with the new ZIO pointer to be used
* by zio_change_priority() in arc_read().
*/
for (struct arc_callback *acb = hdr->b_l1hdr.b_acb;
acb != NULL; acb = acb->acb_next)
acb->acb_zio_head = zio;
mutex_exit(hash_lock);
zio_nowait(zio);
} else {
mutex_exit(hash_lock);
}
}
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* This is the list priority from which the L2ARC will search for pages to
* cache. This is used within loops (0..3) to cycle through lists in the
* desired order. This order can have a significant effect on cache
* performance.
*
* Currently the metadata lists are hit first, MFU then MRU, followed by
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
static multilist_sublist_t *
l2arc_sublist_lock(int list_num)
{
multilist_t *ml = NULL;
unsigned int idx;
ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES);
switch (list_num) {
case 0:
ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
break;
case 1:
ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
break;
case 2:
ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
break;
case 3:
ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
break;
default:
return (NULL);
}
/*
* Return a randomly-selected sublist. This is acceptable
* because the caller feeds only a little bit of data for each
* call (8MB). Subsequent calls will result in different
* sublists being selected.
*/
idx = multilist_get_random_index(ml);
return (multilist_sublist_lock_idx(ml, idx));
}
/*
* Calculates the maximum overhead of L2ARC metadata log blocks for a given
* L2ARC write size. l2arc_evict and l2arc_write_size need to include this
* overhead in processing to make sure there is enough headroom available
* when writing buffers.
*/
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev)
{
if (dev->l2ad_log_entries == 0) {
return (0);
} else {
ASSERT(dev->l2ad_vdev != NULL);
uint64_t log_entries = write_sz >> SPA_MINBLOCKSHIFT;
uint64_t log_blocks = (log_entries +
dev->l2ad_log_entries - 1) /
dev->l2ad_log_entries;
return (vdev_psize_to_asize(dev->l2ad_vdev,
sizeof (l2arc_log_blk_phys_t)) * log_blocks);
}
}
/*
* Evict buffers from the device write hand to the distance specified in
* bytes. This distance may span populated buffers, it may span nothing.
* This is clearing a region on the L2ARC device ready for writing.
* If the 'all' boolean is set, every buffer is evicted.
*/
static void
l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
{
list_t *buflist;
arc_buf_hdr_t *hdr, *hdr_prev;
kmutex_t *hash_lock;
uint64_t taddr;
l2arc_lb_ptr_buf_t *lb_ptr_buf, *lb_ptr_buf_prev;
vdev_t *vd = dev->l2ad_vdev;
boolean_t rerun;
ASSERT(vd != NULL || all);
ASSERT(dev->l2ad_spa != NULL || all);
buflist = &dev->l2ad_buflist;
top:
rerun = B_FALSE;
if (dev->l2ad_hand + distance > dev->l2ad_end) {
/*
* When there is no space to accommodate upcoming writes,
* evict to the end. Then bump the write and evict hands
* to the start and iterate. This iteration does not
* happen indefinitely as we make sure in
* l2arc_write_size() that when the write hand is reset,
* the write size does not exceed the end of the device.
*/
rerun = B_TRUE;
taddr = dev->l2ad_end;
} else {
taddr = dev->l2ad_hand + distance;
}
DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
uint64_t, taddr, boolean_t, all);
if (!all) {
/*
* This check has to be placed after deciding whether to
* iterate (rerun).
*/
if (dev->l2ad_first) {
/*
* This is the first sweep through the device. There is
* nothing to evict. We have already trimmmed the
* whole device.
*/
goto out;
} else {
/*
* Trim the space to be evicted.
*/
if (vd->vdev_has_trim && dev->l2ad_evict < taddr &&
l2arc_trim_ahead > 0) {
/*
* We have to drop the spa_config lock because
* vdev_trim_range() will acquire it.
* l2ad_evict already accounts for the label
* size. To prevent vdev_trim_ranges() from
* adding it again, we subtract it from
* l2ad_evict.
*/
spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev);
vdev_trim_simple(vd,
dev->l2ad_evict - VDEV_LABEL_START_SIZE,
taddr - dev->l2ad_evict);
spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev,
RW_READER);
}
/*
* When rebuilding L2ARC we retrieve the evict hand
* from the header of the device. Of note, l2arc_evict()
* does not actually delete buffers from the cache
* device, but trimming may do so depending on the
* hardware implementation. Thus keeping track of the
* evict hand is useful.
*/
dev->l2ad_evict = MAX(dev->l2ad_evict, taddr);
}
}
retry:
mutex_enter(&dev->l2ad_mtx);
/*
* We have to account for evicted log blocks. Run vdev_space_update()
* on log blocks whose offset (in bytes) is before the evicted offset
* (in bytes) by searching in the list of pointers to log blocks
* present in the L2ARC device.
*/
for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf;
lb_ptr_buf = lb_ptr_buf_prev) {
lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf);
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE(
(lb_ptr_buf->lb_ptr)->lbp_prop);
/*
* We don't worry about log blocks left behind (ie
* lbp_payload_start < l2ad_hand) because l2arc_write_buffers()
* will never write more than l2arc_evict() evicts.
*/
if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) {
break;
} else {
if (vd != NULL)
vdev_space_update(vd, -asize, 0, 0);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
(void) zfs_refcount_remove(&dev->l2ad_lb_count,
lb_ptr_buf);
list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
ASSERT(!HDR_EMPTY(hdr));
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
*/
ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
mutex_exit(&dev->l2ad_mtx);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto retry;
}
/*
* A header can't be on this list if it doesn't have L2 header.
*/
ASSERT(HDR_HAS_L2HDR(hdr));
/* Ensure this header has finished being written. */
ASSERT(!HDR_L2_WRITING(hdr));
ASSERT(!HDR_L2_WRITE_HEAD(hdr));
if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict ||
hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
/*
* We've evicted to the target address,
* or the end of the device.
*/
mutex_exit(hash_lock);
break;
}
if (!HDR_HAS_L1HDR(hdr)) {
ASSERT(!HDR_L2_READING(hdr));
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_lsize.
*/
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
} else {
ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
if (HDR_L2_READING(hdr)) {
ARCSTAT_BUMP(arcstat_l2_evict_reading);
arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
}
arc_hdr_l2hdr_destroy(hdr);
}
mutex_exit(hash_lock);
}
mutex_exit(&dev->l2ad_mtx);
out:
/*
* We need to check if we evict all buffers, otherwise we may iterate
* unnecessarily.
*/
if (!all && rerun) {
/*
* Bump device hand to the device start if it is approaching the
* end. l2arc_evict() has already evicted ahead for this case.
*/
dev->l2ad_hand = dev->l2ad_start;
dev->l2ad_evict = dev->l2ad_start;
dev->l2ad_first = B_FALSE;
goto top;
}
if (!all) {
/*
* In case of cache device removal (all) the following
* assertions may be violated without functional consequences
* as the device is about to be removed.
*/
ASSERT3U(dev->l2ad_hand + distance, <=, dev->l2ad_end);
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
}
}
/*
* Handle any abd transforms that might be required for writing to the L2ARC.
* If successful, this function will always return an abd with the data
* transformed as it is on disk in a new abd of asize bytes.
*/
static int
l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
abd_t **abd_out)
{
int ret;
abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd;
enum zio_compress compress = HDR_GET_COMPRESS(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t size = arc_hdr_size(hdr);
boolean_t ismd = HDR_ISTYPE_METADATA(hdr);
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
dsl_crypto_key_t *dck = NULL;
uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
boolean_t no_crypt = B_FALSE;
ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) ||
HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize);
ASSERT3U(psize, <=, asize);
/*
* If this data simply needs its own buffer, we simply allocate it
* and copy the data. This may be done to eliminate a dependency on a
* shared buffer or to reallocate the buffer to match asize.
*/
if (HDR_HAS_RABD(hdr)) {
ASSERT3U(asize, >, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize);
abd_zero_off(to_write, psize, asize - psize);
goto out;
}
if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
!HDR_ENCRYPTED(hdr)) {
ASSERT3U(size, ==, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (asize > size)
abd_zero_off(to_write, size, asize - size);
goto out;
}
if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) {
cabd = abd_alloc_for_io(MAX(size, asize), ismd);
uint64_t csize = zio_compress_data(compress, to_write, &cabd,
size, MIN(size, psize), hdr->b_complevel);
if (csize >= size || csize > psize) {
/*
* We can't re-compress the block into the original
* psize. Even if it fits into asize, it does not
* matter, since checksum will never match on read.
*/
abd_free(cabd);
return (SET_ERROR(EIO));
}
if (asize > csize)
abd_zero_off(cabd, csize, asize - csize);
to_write = cabd;
}
if (HDR_ENCRYPTED(hdr)) {
eabd = abd_alloc_for_io(asize, ismd);
/*
* If the dataset was disowned before the buffer
* made it to this point, the key to re-encrypt
* it won't be available. In this case we simply
* won't write the buffer to the L2ARC.
*/
ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj,
FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd,
&no_crypt);
if (ret != 0)
goto error;
if (no_crypt)
abd_copy(eabd, to_write, psize);
if (psize != asize)
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
ASSERT0(memcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
abd_free(cabd);
to_write = eabd;
}
out:
ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd);
*abd_out = to_write;
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (cabd != NULL)
abd_free(cabd);
if (eabd != NULL)
abd_free(eabd);
*abd_out = NULL;
return (ret);
}
static void
l2arc_blk_fetch_done(zio_t *zio)
{
l2arc_read_callback_t *cb;
cb = zio->io_private;
if (cb->l2rcb_abd != NULL)
abd_free(cb->l2rcb_abd);
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* Find and write ARC buffers to the L2ARC device.
*
* An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
*
* Returns the number of bytes actually written (which may be smaller than
* the delta by which the device hand has changed due to alignment and the
* writing of log blocks).
*/
static uint64_t
l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
{
arc_buf_hdr_t *hdr, *head, *marker;
uint64_t write_asize, write_psize, headroom;
boolean_t full, from_head = !arc_warm;
l2arc_write_callback_t *cb = NULL;
zio_t *pio, *wzio;
uint64_t guid = spa_load_guid(spa);
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev->l2ad_vdev, !=, NULL);
pio = NULL;
write_asize = write_psize = 0;
full = B_FALSE;
head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
marker = arc_state_alloc_marker();
/*
* Copy buffers for L2ARC writing.
*/
for (int pass = 0; pass < L2ARC_FEED_TYPES; pass++) {
/*
* pass == 0: MFU meta
* pass == 1: MRU meta
* pass == 2: MFU data
* pass == 3: MRU data
*/
if (l2arc_mfuonly == 1) {
if (pass == 1 || pass == 3)
continue;
} else if (l2arc_mfuonly > 1) {
if (pass == 3)
continue;
}
uint64_t passed_sz = 0;
headroom = target_sz * l2arc_headroom;
if (zfs_compressed_arc_enabled)
headroom = (headroom * l2arc_headroom_boost) / 100;
/*
* Until the ARC is warm and starts to evict, read from the
* head of the ARC lists rather than the tail.
*/
multilist_sublist_t *mls = l2arc_sublist_lock(pass);
ASSERT3P(mls, !=, NULL);
if (from_head)
hdr = multilist_sublist_head(mls);
else
hdr = multilist_sublist_tail(mls);
while (hdr != NULL) {
kmutex_t *hash_lock;
abd_t *to_write = NULL;
hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
skip:
/* Skip this buffer rather than waiting. */
if (from_head)
hdr = multilist_sublist_next(mls, hdr);
else
hdr = multilist_sublist_prev(mls, hdr);
continue;
}
passed_sz += HDR_GET_LSIZE(hdr);
if (l2arc_headroom != 0 && passed_sz > headroom) {
/*
* Searched too far.
*/
mutex_exit(hash_lock);
break;
}
if (!l2arc_write_eligible(guid, hdr)) {
mutex_exit(hash_lock);
goto skip;
}
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT3U(arc_hdr_size(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
psize);
/*
* If the allocated size of this buffer plus the max
* size for the pending log block exceeds the evicted
* target size, terminate writing buffers for this run.
*/
if (write_asize + asize +
sizeof (l2arc_log_blk_phys_t) > target_sz) {
full = B_TRUE;
mutex_exit(hash_lock);
break;
}
/*
* We should not sleep with sublist lock held or it
* may block ARC eviction. Insert a marker to save
* the position and drop the lock.
*/
if (from_head) {
multilist_sublist_insert_after(mls, hdr,
marker);
} else {
multilist_sublist_insert_before(mls, hdr,
marker);
}
multilist_sublist_unlock(mls);
/*
* If this header has b_rabd, we can use this since it
* must always match the data exactly as it exists on
* disk. Otherwise, the L2ARC can normally use the
* hdr's data, but if we're sharing data between the
* hdr and one of its bufs, L2ARC needs its own copy of
* the data so that the ZIO below can't race with the
* buf consumer. To ensure that this copy will be
* available for the lifetime of the ZIO and be cleaned
* up afterwards, we add it to the l2arc_free_on_write
* queue. If we need to apply any transforms to the
* data (compression, encryption) we will also need the
* extra buffer.
*/
if (HDR_HAS_RABD(hdr) && psize == asize) {
to_write = hdr->b_crypt_hdr.b_rabd;
} else if ((HDR_COMPRESSION_ENABLED(hdr) ||
HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) &&
!HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) &&
psize == asize) {
to_write = hdr->b_l1hdr.b_pabd;
} else {
int ret;
arc_buf_contents_t type = arc_buf_type(hdr);
ret = l2arc_apply_transforms(spa, hdr, asize,
&to_write);
if (ret != 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_L2CACHE);
mutex_exit(hash_lock);
goto next;
}
l2arc_free_abd_on_write(to_write, asize, type);
}
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
hdr->b_l2hdr.b_hits = 0;
hdr->b_l2hdr.b_arcs_state =
hdr->b_l1hdr.b_state->arcs_state;
/* l2arc_hdr_arcstats_update() expects a valid asize */
HDR_SET_L2SIZE(hdr, asize);
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR |
ARC_FLAG_L2_WRITING);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_enter(&dev->l2ad_mtx);
if (pio == NULL) {
/*
* Insert a dummy header on the buflist so
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
list_insert_head(&dev->l2ad_buflist, head);
}
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
boolean_t commit = l2arc_log_blk_insert(dev, hdr);
mutex_exit(hash_lock);
if (pio == NULL) {
cb = kmem_alloc(
sizeof (l2arc_write_callback_t), KM_SLEEP);
cb->l2wcb_dev = dev;
cb->l2wcb_head = head;
list_create(&cb->l2wcb_abd_list,
sizeof (l2arc_lb_abd_buf_t),
offsetof(l2arc_lb_abd_buf_t, node));
pio = zio_root(spa, l2arc_write_done, cb,
ZIO_FLAG_CANFAIL);
}
wzio = zio_write_phys(pio, dev->l2ad_vdev,
dev->l2ad_hand, asize, to_write,
ZIO_CHECKSUM_OFF, NULL, hdr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_CANFAIL, B_FALSE);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
zio_t *, wzio);
zio_nowait(wzio);
write_psize += psize;
write_asize += asize;
dev->l2ad_hand += asize;
if (commit) {
/* l2ad_hand will be adjusted inside. */
write_asize +=
l2arc_log_blk_commit(dev, pio, cb);
}
next:
multilist_sublist_lock(mls);
if (from_head)
hdr = multilist_sublist_next(mls, marker);
else
hdr = multilist_sublist_prev(mls, marker);
multilist_sublist_remove(mls, marker);
}
multilist_sublist_unlock(mls);
if (full == B_TRUE)
break;
}
arc_state_free_marker(marker);
/* No buffers selected for writing? */
if (pio == NULL) {
ASSERT0(write_psize);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
/*
* Although we did not write any buffers l2ad_evict may
* have advanced.
*/
if (dev->l2ad_evict != l2dhdr->dh_evict)
l2arc_dev_hdr_update(dev);
return (0);
}
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
ASSERT3U(write_asize, <=, target_sz);
ARCSTAT_BUMP(arcstat_l2_writes_sent);
ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
dev->l2ad_writing = B_TRUE;
(void) zio_wait(pio);
dev->l2ad_writing = B_FALSE;
/*
* Update the device header after the zio completes as
* l2arc_write_done() may have updated the memory holding the log block
* pointers in the device header.
*/
l2arc_dev_hdr_update(dev);
return (write_asize);
}
static boolean_t
l2arc_hdr_limit_reached(void)
{
int64_t s = aggsum_upper_bound(&arc_sums.arcstat_l2_hdr_size);
return (arc_reclaim_needed() ||
(s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100));
}
/*
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
static __attribute__((noreturn)) void
l2arc_feed_thread(void *unused)
{
(void) unused;
callb_cpr_t cpr;
l2arc_dev_t *dev;
spa_t *spa;
uint64_t size, wrote;
clock_t begin, next = ddi_get_lbolt();
fstrans_cookie_t cookie;
CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
mutex_enter(&l2arc_feed_thr_lock);
cookie = spl_fstrans_mark();
while (l2arc_thread_exit == 0) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle(&l2arc_feed_thr_cv,
&l2arc_feed_thr_lock, next);
CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
next = ddi_get_lbolt() + hz;
/*
* Quick check for L2ARC devices.
*/
mutex_enter(&l2arc_dev_mtx);
if (l2arc_ndev == 0) {
mutex_exit(&l2arc_dev_mtx);
continue;
}
mutex_exit(&l2arc_dev_mtx);
begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
* doing so the next spa to feed from: dev->l2ad_spa. This
* will return NULL if there are now no l2arc devices or if
* they are all faulted.
*
* If a device is returned, its spa's config lock is also
* held to prevent device removal. l2arc_dev_get_next()
* will grab and release l2arc_dev_mtx.
*/
if ((dev = l2arc_dev_get_next()) == NULL)
continue;
spa = dev->l2ad_spa;
ASSERT3P(spa, !=, NULL);
/*
* If the pool is read-only then force the feed thread to
* sleep a little longer.
*/
if (!spa_writeable(spa)) {
next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
/*
* Avoid contributing to memory pressure.
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
ARCSTAT_BUMP(arcstat_l2_feeds);
size = l2arc_write_size(dev);
/*
* Evict L2ARC buffers that will be overwritten.
*/
l2arc_evict(dev, size, B_FALSE);
/*
* Write ARC buffers.
*/
wrote = l2arc_write_buffers(spa, dev, size);
/*
* Calculate interval between writes.
*/
next = l2arc_write_interval(begin, size, wrote);
spa_config_exit(spa, SCL_L2ARC, dev);
}
spl_fstrans_unmark(cookie);
l2arc_thread_exit = 0;
cv_broadcast(&l2arc_feed_thr_cv);
CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
thread_exit();
}
boolean_t
l2arc_vdev_present(vdev_t *vd)
{
return (l2arc_vdev_get(vd) != NULL);
}
/*
* Returns the l2arc_dev_t associated with a particular vdev_t or NULL if
* the vdev_t isn't an L2ARC device.
*/
l2arc_dev_t *
l2arc_vdev_get(vdev_t *vd)
{
l2arc_dev_t *dev;
mutex_enter(&l2arc_dev_mtx);
for (dev = list_head(l2arc_dev_list); dev != NULL;
dev = list_next(l2arc_dev_list, dev)) {
if (dev->l2ad_vdev == vd)
break;
}
mutex_exit(&l2arc_dev_mtx);
return (dev);
}
static void
l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
spa_t *spa = dev->l2ad_spa;
/*
* After a l2arc_remove_vdev(), the spa_t will no longer be valid
*/
if (spa == NULL)
return;
/*
* The L2ARC has to hold at least the payload of one log block for
* them to be restored (persistent L2ARC). The payload of a log block
* depends on the amount of its log entries. We always write log blocks
* with 1022 entries. How many of them are committed or restored depends
* on the size of the L2ARC device. Thus the maximum payload of
* one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device
* is less than that, we reduce the amount of committed and restored
* log entries per block so as to enable persistence.
*/
if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) {
dev->l2ad_log_entries = 0;
} else {
dev->l2ad_log_entries = MIN((dev->l2ad_end -
dev->l2ad_start) >> SPA_MAXBLOCKSHIFT,
L2ARC_LOG_BLK_MAX_ENTRIES);
}
/*
* Read the device header, if an error is returned do not rebuild L2ARC.
*/
if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) {
/*
* If we are onlining a cache device (vdev_reopen) that was
* still present (l2arc_vdev_present()) and rebuild is enabled,
* we should evict all ARC buffers and pointers to log blocks
* and reclaim their space before restoring its contents to
* L2ARC.
*/
if (reopen) {
if (!l2arc_rebuild_enabled) {
return;
} else {
l2arc_evict(dev, 0, B_TRUE);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
}
/*
* Just mark the device as pending for a rebuild. We won't
* be starting a rebuild in line here as it would block pool
* import. Instead spa_load_impl will hand that off to an
* async task which will call l2arc_spa_rebuild_start.
*/
dev->l2ad_rebuild = B_TRUE;
} else if (spa_writeable(spa)) {
/*
* In this case TRIM the whole device if l2arc_trim_ahead > 0,
* otherwise create a new header. We zero out the memory holding
* the header to reset dh_start_lbps. If we TRIM the whole
* device the new header will be written by
* vdev_trim_l2arc_thread() at the end of the TRIM to update the
* trim_state in the header too. When reading the header, if
* trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0
* we opt to TRIM the whole device again.
*/
if (l2arc_trim_ahead > 0) {
dev->l2ad_trim_all = B_TRUE;
} else {
memset(l2dhdr, 0, l2dhdr_asize);
l2arc_dev_hdr_update(dev);
}
}
}
/*
* Add a vdev for use by the L2ARC. By this point the spa has already
* validated the vdev and opened it.
*/
void
l2arc_add_vdev(spa_t *spa, vdev_t *vd)
{
l2arc_dev_t *adddev;
uint64_t l2dhdr_asize;
ASSERT(!l2arc_vdev_present(vd));
/*
* Create a new l2arc device entry.
*/
adddev = vmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
adddev->l2ad_spa = spa;
adddev->l2ad_vdev = vd;
/* leave extra size for an l2arc device header */
l2dhdr_asize = adddev->l2ad_dev_hdr_asize =
MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift);
adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize;
adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end);
adddev->l2ad_hand = adddev->l2ad_start;
adddev->l2ad_evict = adddev->l2ad_start;
adddev->l2ad_first = B_TRUE;
adddev->l2ad_writing = B_FALSE;
adddev->l2ad_trim_all = B_FALSE;
list_link_init(&adddev->l2ad_node);
adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP);
mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
/*
* This is a list of all ARC buffers that are still valid on the
* device.
*/
list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
/*
* This is a list of pointers to log blocks that are still present
* on the device.
*/
list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t),
offsetof(l2arc_lb_ptr_buf_t, node));
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
zfs_refcount_create(&adddev->l2ad_alloc);
zfs_refcount_create(&adddev->l2ad_lb_asize);
zfs_refcount_create(&adddev->l2ad_lb_count);
/*
* Decide if dev is eligible for L2ARC rebuild or whole device
* trimming. This has to happen before the device is added in the
* cache device list and l2arc_dev_mtx is released. Otherwise
* l2arc_feed_thread() might already start writing on the
* device.
*/
l2arc_rebuild_dev(adddev, B_FALSE);
/*
* Add device to global list
*/
mutex_enter(&l2arc_dev_mtx);
list_insert_head(l2arc_dev_list, adddev);
atomic_inc_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
}
/*
* Decide if a vdev is eligible for L2ARC rebuild, called from vdev_reopen()
* in case of onlining a cache device.
*/
void
l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen)
{
l2arc_dev_t *dev = NULL;
dev = l2arc_vdev_get(vd);
ASSERT3P(dev, !=, NULL);
/*
* In contrast to l2arc_add_vdev() we do not have to worry about
* l2arc_feed_thread() invalidating previous content when onlining a
* cache device. The device parameters (l2ad*) are not cleared when
* offlining the device and writing new buffers will not invalidate
* all previous content. In worst case only buffers that have not had
* their log block written to the device will be lost.
* When onlining the cache device (ie offline->online without exporting
* the pool in between) this happens:
* vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev()
* | |
* vdev_is_dead() = B_FALSE l2ad_rebuild = B_TRUE
* During the time where vdev_is_dead = B_FALSE and until l2ad_rebuild
* is set to B_TRUE we might write additional buffers to the device.
*/
l2arc_rebuild_dev(dev, reopen);
}
typedef struct {
l2arc_dev_t *rva_l2arc_dev;
uint64_t rva_spa_gid;
uint64_t rva_vdev_gid;
boolean_t rva_async;
} remove_vdev_args_t;
static void
l2arc_device_teardown(void *arg)
{
remove_vdev_args_t *rva = arg;
l2arc_dev_t *remdev = rva->rva_l2arc_dev;
hrtime_t start_time = gethrtime();
/*
* Clear all buflists and ARC references. L2ARC device flush.
*/
l2arc_evict(remdev, 0, B_TRUE);
list_destroy(&remdev->l2ad_buflist);
ASSERT(list_is_empty(&remdev->l2ad_lbptr_list));
list_destroy(&remdev->l2ad_lbptr_list);
mutex_destroy(&remdev->l2ad_mtx);
zfs_refcount_destroy(&remdev->l2ad_alloc);
zfs_refcount_destroy(&remdev->l2ad_lb_asize);
zfs_refcount_destroy(&remdev->l2ad_lb_count);
kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize);
vmem_free(remdev, sizeof (l2arc_dev_t));
uint64_t elaspsed = NSEC2MSEC(gethrtime() - start_time);
if (elaspsed > 0) {
zfs_dbgmsg("spa %llu, vdev %llu removed in %llu ms",
(u_longlong_t)rva->rva_spa_gid,
(u_longlong_t)rva->rva_vdev_gid,
(u_longlong_t)elaspsed);
}
if (rva->rva_async)
arc_async_flush_remove(rva->rva_spa_gid, 2);
kmem_free(rva, sizeof (remove_vdev_args_t));
}
/*
* Remove a vdev from the L2ARC.
*/
void
l2arc_remove_vdev(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
boolean_t asynchronous = spa->spa_state == POOL_STATE_EXPORTED ||
spa->spa_state == POOL_STATE_DESTROYED;
/*
* Find the device by vdev
*/
l2arc_dev_t *remdev = l2arc_vdev_get(vd);
ASSERT3P(remdev, !=, NULL);
/*
* Save info for final teardown
*/
remove_vdev_args_t *rva = kmem_alloc(sizeof (remove_vdev_args_t),
KM_SLEEP);
rva->rva_l2arc_dev = remdev;
rva->rva_spa_gid = spa_load_guid(spa);
rva->rva_vdev_gid = remdev->l2ad_vdev->vdev_guid;
/*
* Cancel any ongoing or scheduled rebuild.
*/
mutex_enter(&l2arc_rebuild_thr_lock);
remdev->l2ad_rebuild_cancel = B_TRUE;
if (remdev->l2ad_rebuild_began == B_TRUE) {
while (remdev->l2ad_rebuild == B_TRUE)
cv_wait(&l2arc_rebuild_thr_cv, &l2arc_rebuild_thr_lock);
}
mutex_exit(&l2arc_rebuild_thr_lock);
rva->rva_async = asynchronous;
/*
* Remove device from global list
*/
ASSERT(spa_config_held(spa, SCL_L2ARC, RW_WRITER) & SCL_L2ARC);
mutex_enter(&l2arc_dev_mtx);
list_remove(l2arc_dev_list, remdev);
l2arc_dev_last = NULL; /* may have been invalidated */
atomic_dec_64(&l2arc_ndev);
/* During a pool export spa & vdev will no longer be valid */
if (asynchronous) {
remdev->l2ad_spa = NULL;
remdev->l2ad_vdev = NULL;
}
mutex_exit(&l2arc_dev_mtx);
if (!asynchronous) {
l2arc_device_teardown(rva);
return;
}
arc_async_flush_t *af = arc_async_flush_add(rva->rva_spa_gid, 2);
taskq_dispatch_ent(arc_flush_taskq, l2arc_device_teardown, rva,
TQ_SLEEP, &af->af_tqent);
}
void
l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_rebuild_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_rebuild_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
l2arc_dev_list = &L2ARC_dev_list;
l2arc_free_on_write = &L2ARC_free_on_write;
list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
offsetof(l2arc_dev_t, l2ad_node));
list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
offsetof(l2arc_data_free_t, l2df_list_node));
}
void
l2arc_fini(void)
{
mutex_destroy(&l2arc_feed_thr_lock);
cv_destroy(&l2arc_feed_thr_cv);
mutex_destroy(&l2arc_rebuild_thr_lock);
cv_destroy(&l2arc_rebuild_thr_cv);
mutex_destroy(&l2arc_dev_mtx);
mutex_destroy(&l2arc_free_on_write_mtx);
list_destroy(l2arc_dev_list);
list_destroy(l2arc_free_on_write);
}
void
l2arc_start(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
TS_RUN, defclsyspri);
}
void
l2arc_stop(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
mutex_enter(&l2arc_feed_thr_lock);
cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
l2arc_thread_exit = 1;
while (l2arc_thread_exit != 0)
cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
mutex_exit(&l2arc_feed_thr_lock);
}
/*
* Punches out rebuild threads for the L2ARC devices in a spa. This should
* be called after pool import from the spa async thread, since starting
* these threads directly from spa_import() will make them part of the
* "zpool import" context and delay process exit (and thus pool import).
*/
void
l2arc_spa_rebuild_start(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off rebuild threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
l2arc_dev_t *dev =
l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
if (dev == NULL) {
/* Don't attempt a rebuild if the vdev is UNAVAIL */
continue;
}
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild_began = B_TRUE;
(void) thread_create(NULL, 0, l2arc_dev_rebuild_thread,
dev, 0, &p0, TS_RUN, minclsyspri);
}
mutex_exit(&l2arc_rebuild_thr_lock);
}
}
void
l2arc_spa_rebuild_stop(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_export_thread == curthread);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
l2arc_dev_t *dev =
l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
if (dev == NULL)
continue;
mutex_enter(&l2arc_rebuild_thr_lock);
dev->l2ad_rebuild_cancel = B_TRUE;
mutex_exit(&l2arc_rebuild_thr_lock);
}
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
l2arc_dev_t *dev =
l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
if (dev == NULL)
continue;
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild_began == B_TRUE) {
while (dev->l2ad_rebuild == B_TRUE) {
cv_wait(&l2arc_rebuild_thr_cv,
&l2arc_rebuild_thr_lock);
}
}
mutex_exit(&l2arc_rebuild_thr_lock);
}
}
/*
* Main entry point for L2ARC rebuilding.
*/
static __attribute__((noreturn)) void
l2arc_dev_rebuild_thread(void *arg)
{
l2arc_dev_t *dev = arg;
VERIFY(dev->l2ad_rebuild);
(void) l2arc_rebuild(dev);
mutex_enter(&l2arc_rebuild_thr_lock);
dev->l2ad_rebuild_began = B_FALSE;
dev->l2ad_rebuild = B_FALSE;
cv_signal(&l2arc_rebuild_thr_cv);
mutex_exit(&l2arc_rebuild_thr_lock);
thread_exit();
}
/*
* This function implements the actual L2ARC metadata rebuild. It:
* starts reading the log block chain and restores each block's contents
* to memory (reconstructing arc_buf_hdr_t's).
*
* Operation stops under any of the following conditions:
*
* 1) We reach the end of the log block chain.
* 2) We encounter *any* error condition (cksum errors, io errors)
*/
static int
l2arc_rebuild(l2arc_dev_t *dev)
{
vdev_t *vd = dev->l2ad_vdev;
spa_t *spa = vd->vdev_spa;
int err = 0;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
l2arc_log_blk_phys_t *this_lb, *next_lb;
zio_t *this_io = NULL, *next_io = NULL;
l2arc_log_blkptr_t lbps[2];
l2arc_lb_ptr_buf_t *lb_ptr_buf;
boolean_t lock_held;
this_lb = vmem_zalloc(sizeof (*this_lb), KM_SLEEP);
next_lb = vmem_zalloc(sizeof (*next_lb), KM_SLEEP);
/*
* We prevent device removal while issuing reads to the device,
* then during the rebuilding phases we drop this lock again so
* that a spa_unload or device remove can be initiated - this is
* safe, because the spa will signal us to stop before removing
* our device and wait for us to stop.
*/
spa_config_enter(spa, SCL_L2ARC, vd, RW_READER);
lock_held = B_TRUE;
/*
* Retrieve the persistent L2ARC device state.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start);
dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop),
dev->l2ad_start);
dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time;
vd->vdev_trim_state = l2dhdr->dh_trim_state;
/*
* In case the zfs module parameter l2arc_rebuild_enabled is false
* we do not start the rebuild process.
*/
if (!l2arc_rebuild_enabled)
goto out;
/* Prepare the rebuild process */
memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
/* Start the rebuild process */
for (;;) {
if (!l2arc_log_blkptr_valid(dev, &lbps[0]))
break;
if ((err = l2arc_log_blk_read(dev, &lbps[0], &lbps[1],
this_lb, next_lb, this_io, &next_io)) != 0)
goto out;
/*
* Our memory pressure valve. If the system is running low
* on memory, rather than swamping memory with new ARC buf
* hdrs, we opt not to rebuild the L2ARC. At this point,
* however, we have already set up our L2ARC dev to chain in
* new metadata log blocks, so the user may choose to offline/
* online the L2ARC dev at a later time (or re-import the pool)
* to reconstruct it (when there's less memory pressure).
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem);
cmn_err(CE_NOTE, "System running low on memory, "
"aborting L2ARC rebuild.");
err = SET_ERROR(ENOMEM);
goto out;
}
spa_config_exit(spa, SCL_L2ARC, vd);
lock_held = B_FALSE;
/*
* Now that we know that the next_lb checks out alright, we
* can start reconstruction from this log block.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
l2arc_log_blk_restore(dev, this_lb, asize);
/*
* log block restored, include its pointer in the list of
* pointers to log blocks present in the L2ARC device.
*/
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t),
KM_SLEEP);
memcpy(lb_ptr_buf->lb_ptr, &lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(vd, asize, 0, 0);
/*
* Protection against loops of log blocks:
*
* l2ad_hand l2ad_evict
* V V
* l2ad_start |=======================================| l2ad_end
* -----|||----|||---|||----|||
* (3) (2) (1) (0)
* ---|||---|||----|||---|||
* (7) (6) (5) (4)
*
* In this situation the pointer of log block (4) passes
* l2arc_log_blkptr_valid() but the log block should not be
* restored as it is overwritten by the payload of log block
* (0). Only log blocks (0)-(3) should be restored. We check
* whether l2ad_evict lies in between the payload starting
* offset of the next log block (lbps[1].lbp_payload_start)
* and the payload starting offset of the present log block
* (lbps[0].lbp_payload_start). If true and this isn't the
* first pass, we are looping from the beginning and we should
* stop.
*/
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev->l2ad_evict) &&
!dev->l2ad_first)
goto out;
kpreempt(KPREEMPT_SYNC);
for (;;) {
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild_cancel) {
mutex_exit(&l2arc_rebuild_thr_lock);
err = SET_ERROR(ECANCELED);
goto out;
}
mutex_exit(&l2arc_rebuild_thr_lock);
if (spa_config_tryenter(spa, SCL_L2ARC, vd,
RW_READER)) {
lock_held = B_TRUE;
break;
}
/*
* L2ARC config lock held by somebody in writer,
* possibly due to them trying to remove us. They'll
* likely to want us to shut down, so after a little
* delay, we check l2ad_rebuild_cancel and retry
* the lock again.
*/
delay(1);
}
/*
* Continue with the next log block.
*/
lbps[0] = lbps[1];
lbps[1] = this_lb->lb_prev_lbp;
PTR_SWAP(this_lb, next_lb);
this_io = next_io;
next_io = NULL;
}
if (this_io != NULL)
l2arc_log_blk_fetch_abort(this_io);
out:
if (next_io != NULL)
l2arc_log_blk_fetch_abort(next_io);
vmem_free(this_lb, sizeof (*this_lb));
vmem_free(next_lb, sizeof (*next_lb));
if (err == ECANCELED) {
/*
* In case the rebuild was canceled do not log to spa history
* log as the pool may be in the process of being removed.
*/
zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
return (err);
} else if (!l2arc_rebuild_enabled) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"disabled");
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_success);
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"successful, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) {
/*
* No error but also nothing restored, meaning the lbps array
* in the device header points to invalid/non-present log
* blocks. Reset the header.
*/
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"no valid log blocks");
memset(l2dhdr, 0, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
} else if (err != 0) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
}
if (lock_held)
spa_config_exit(spa, SCL_L2ARC, vd);
return (err);
}
/*
* Attempts to read the device header on the provided L2ARC device and writes
* it to `hdr'. On success, this function returns 0, otherwise the appropriate
* error code is returned.
*/
static int
l2arc_dev_hdr_read(l2arc_dev_t *dev)
{
int err;
uint64_t guid;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
guid = spa_guid(dev->l2ad_vdev->vdev_spa);
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd,
ZIO_CHECKSUM_LABEL, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_SPECULATIVE, B_FALSE));
abd_free(abd);
if (err != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
return (err);
}
if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(l2dhdr, sizeof (*l2dhdr));
if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC ||
l2dhdr->dh_spa_guid != guid ||
l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid ||
l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION ||
l2dhdr->dh_log_entries != dev->l2ad_log_entries ||
l2dhdr->dh_end != dev->l2ad_end ||
!l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end,
l2dhdr->dh_evict) ||
(l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE &&
l2arc_trim_ahead > 0)) {
/*
* Attempt to rebuild a device containing no actual dev hdr
* or containing a header from some other pool or from another
* version of persistent L2ARC.
*/
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported);
return (SET_ERROR(ENOTSUP));
}
return (0);
}
/*
* Reads L2ARC log blocks from storage and validates their contents.
*
* This function implements a simple fetcher to make sure that while
* we're processing one buffer the L2ARC is already fetching the next
* one in the chain.
*
* The arguments this_lp and next_lp point to the current and next log block
* address in the block chain. Similarly, this_lb and next_lb hold the
* l2arc_log_blk_phys_t's of the current and next L2ARC blk.
*
* The `this_io' and `next_io' arguments are used for block fetching.
* When issuing the first blk IO during rebuild, you should pass NULL for
* `this_io'. This function will then issue a sync IO to read the block and
* also issue an async IO to fetch the next block in the block chain. The
* fetched IO is returned in `next_io'. On subsequent calls to this
* function, pass the value returned in `next_io' from the previous call
* as `this_io' and a fresh `next_io' pointer to hold the next fetch IO.
* Prior to the call, you should initialize your `next_io' pointer to be
* NULL. If no fetch IO was issued, the pointer is left set at NULL.
*
* On success, this function returns 0, otherwise it returns an appropriate
* error code. On error the fetching IO is aborted and cleared before
* returning from this function. Therefore, if we return `success', the
* caller can assume that we have taken care of cleanup of fetch IOs.
*/
static int
l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lbp, const l2arc_log_blkptr_t *next_lbp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io)
{
int err = 0;
zio_cksum_t cksum;
uint64_t asize;
ASSERT(this_lbp != NULL && next_lbp != NULL);
ASSERT(this_lb != NULL && next_lb != NULL);
ASSERT(next_io != NULL && *next_io == NULL);
ASSERT(l2arc_log_blkptr_valid(dev, this_lbp));
/*
* Check to see if we have issued the IO for this log block in a
* previous run. If not, this is the first call, so issue it now.
*/
if (this_io == NULL) {
this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp,
this_lb);
}
/*
* Peek to see if we can start issuing the next IO immediately.
*/
if (l2arc_log_blkptr_valid(dev, next_lbp)) {
/*
* Start issuing IO for the next log block early - this
* should help keep the L2ARC device busy while we
* decompress and restore this log block.
*/
*next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp,
next_lb);
}
/* Wait for the IO to read this log block to complete */
if ((err = zio_wait(this_io)) != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading log block, "
"offset: %llu, vdev guid: %llu", err,
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
goto cleanup;
}
/*
* Make sure the buffer checks out.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop);
fletcher_4_native(this_lb, asize, NULL, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors);
zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, "
"vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu",
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid,
(u_longlong_t)dev->l2ad_hand,
(u_longlong_t)dev->l2ad_evict);
err = SET_ERROR(ECKSUM);
goto cleanup;
}
/* Now we can take our time decoding this buffer */
switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
case ZIO_COMPRESS_LZ4: {
abd_t *abd = abd_alloc_linear(asize, B_TRUE);
abd_copy_from_buf_off(abd, this_lb, 0, asize);
abd_t dabd;
abd_get_from_buf_struct(&dabd, this_lb, sizeof (*this_lb));
err = zio_decompress_data(
L2BLK_GET_COMPRESS((this_lbp)->lbp_prop),
abd, &dabd, asize, sizeof (*this_lb), NULL);
abd_free(&dabd);
abd_free(abd);
if (err != 0) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
break;
}
default:
err = SET_ERROR(EINVAL);
goto cleanup;
}
if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(this_lb, sizeof (*this_lb));
if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
cleanup:
/* Abort an in-flight fetch I/O in case of error */
if (err != 0 && *next_io != NULL) {
l2arc_log_blk_fetch_abort(*next_io);
*next_io = NULL;
}
return (err);
}
/*
* Restores the payload of a log block to ARC. This creates empty ARC hdr
* entries which only contain an l2arc hdr, essentially restoring the
* buffers to their L2ARC evicted state. This function also updates space
* usage on the L2ARC vdev to make sure it tracks restored buffers.
*/
static void
l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb,
uint64_t lb_asize)
{
uint64_t size = 0, asize = 0;
uint64_t log_entries = dev->l2ad_log_entries;
/*
* Usually arc_adapt() is called only for data, not headers, but
* since we may allocate significant amount of memory here, let ARC
* grow its arc_c.
*/
arc_adapt(log_entries * HDR_L2ONLY_SIZE);
for (int i = log_entries - 1; i >= 0; i--) {
/*
* Restore goes in the reverse temporal direction to preserve
* correct temporal ordering of buffers in the l2ad_buflist.
* l2arc_hdr_restore also does a list_insert_tail instead of
* list_insert_head on the l2ad_buflist:
*
* LIST l2ad_buflist LIST
* HEAD <------ (time) ------ TAIL
* direction +-----+-----+-----+-----+-----+ direction
* of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild
* fill +-----+-----+-----+-----+-----+
* ^ ^
* | |
* | |
* l2arc_feed_thread l2arc_rebuild
* will place new bufs here restores bufs here
*
* During l2arc_rebuild() the device is not used by
* l2arc_feed_thread() as dev->l2ad_rebuild is set to true.
*/
size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop);
asize += vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop));
l2arc_hdr_restore(&lb->lb_entries[i], dev);
}
/*
* Record rebuild stats:
* size Logical size of restored buffers in the L2ARC
* asize Aligned size of restored buffers in the L2ARC
*/
ARCSTAT_INCR(arcstat_l2_rebuild_size, size);
ARCSTAT_INCR(arcstat_l2_rebuild_asize, asize);
ARCSTAT_INCR(arcstat_l2_rebuild_bufs, log_entries);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, lb_asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, asize / lb_asize);
ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks);
}
/*
* Restores a single ARC buf hdr from a log entry. The ARC buffer is put
* into a state indicating that it has been evicted to L2ARC.
*/
static void
l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
{
arc_buf_hdr_t *hdr, *exists;
kmutex_t *hash_lock;
arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((le)->le_prop));
/*
* Do all the allocation before grabbing any locks, this lets us
* sleep if memory is full and we don't have to deal with failed
* allocations.
*/
hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type,
dev, le->le_dva, le->le_daddr,
L2BLK_GET_PSIZE((le)->le_prop), asize, le->le_birth,
L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel,
L2BLK_GET_PROTECTED((le)->le_prop),
L2BLK_GET_PREFETCH((le)->le_prop),
L2BLK_GET_STATE((le)->le_prop));
/*
* vdev_space_update() has to be called before arc_hdr_destroy() to
* avoid underflow since the latter also calls vdev_space_update().
*/
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
mutex_exit(&dev->l2ad_mtx);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists) {
/* Buffer was already cached, no need to restore it. */
arc_hdr_destroy(hdr);
/*
* If the buffer is already cached, check whether it has
* L2ARC metadata. If not, enter them and update the flag.
* This is important is case of onlining a cache device, since
* we previously evicted all L2ARC metadata from ARC.
*/
if (!HDR_HAS_L2HDR(exists)) {
arc_hdr_set_flags(exists, ARC_FLAG_HAS_L2HDR);
exists->b_l2hdr.b_dev = dev;
exists->b_l2hdr.b_daddr = le->le_daddr;
exists->b_l2hdr.b_arcs_state =
L2BLK_GET_STATE((le)->le_prop);
/* l2arc_hdr_arcstats_update() expects a valid asize */
HDR_SET_L2SIZE(exists, asize);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, exists);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(exists), exists);
mutex_exit(&dev->l2ad_mtx);
l2arc_hdr_arcstats_increment(exists);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
}
ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached);
}
mutex_exit(hash_lock);
}
/*
* Starts an asynchronous read IO to read a log block. This is used in log
* block reconstruction to start reading the next block before we are done
* decoding and reconstructing the current block, to keep the l2arc device
* nice and hot with read IO to process.
* The returned zio will contain a newly allocated memory buffers for the IO
* data which should then be freed by the caller once the zio is no longer
* needed (i.e. due to it having completed). If you wish to abort this
* zio, you should do so using l2arc_log_blk_fetch_abort, which takes
* care of disposing of the allocated buffers correctly.
*/
static zio_t *
l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp,
l2arc_log_blk_phys_t *lb)
{
uint32_t asize;
zio_t *pio;
l2arc_read_callback_t *cb;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
ASSERT(asize <= sizeof (l2arc_log_blk_phys_t));
cb = kmem_zalloc(sizeof (l2arc_read_callback_t), KM_SLEEP);
cb->l2rcb_abd = abd_get_from_buf(lb, asize);
pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY);
(void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize,
cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE));
return (pio);
}
/*
* Aborts a zio returned from l2arc_log_blk_fetch and frees the data
* buffers allocated for it.
*/
static void
l2arc_log_blk_fetch_abort(zio_t *zio)
{
(void) zio_wait(zio);
}
/*
* Creates a zio to update the device header on an l2arc device.
*/
void
l2arc_dev_hdr_update(l2arc_dev_t *dev)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
int err;
VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER));
l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC;
l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION;
l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa);
l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid;
l2dhdr->dh_log_entries = dev->l2ad_log_entries;
l2dhdr->dh_evict = dev->l2ad_evict;
l2dhdr->dh_start = dev->l2ad_start;
l2dhdr->dh_end = dev->l2ad_end;
l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize);
l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count);
l2dhdr->dh_flags = 0;
l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time;
l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state;
if (dev->l2ad_first)
l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST;
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd, ZIO_CHECKSUM_LABEL, NULL,
NULL, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE));
abd_free(abd);
if (err != 0) {
zfs_dbgmsg("L2ARC IO error (%d) while writing device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
}
}
/*
* Commits a log block to the L2ARC device. This routine is invoked from
* l2arc_write_buffers when the log block fills up.
* This function allocates some memory to temporarily hold the serialized
* buffer to be written. This is then released in l2arc_write_done.
*/
static uint64_t
l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t psize, asize;
zio_t *wzio;
l2arc_lb_abd_buf_t *abd_buf;
abd_t *abd = NULL;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries);
abd_buf = zio_buf_alloc(sizeof (*abd_buf));
abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb));
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP);
/* link the buffer into the block chain */
lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1];
lb->lb_magic = L2ARC_LOG_BLK_MAGIC;
/*
* l2arc_log_blk_commit() may be called multiple times during a single
* l2arc_write_buffers() call. Save the allocated abd buffers in a list
* so we can free them in l2arc_write_done() later on.
*/
list_insert_tail(&cb->l2wcb_abd_list, abd_buf);
/* try to compress the buffer, at least one sector to save */
psize = zio_compress_data(ZIO_COMPRESS_LZ4,
abd_buf->abd, &abd, sizeof (*lb),
zio_get_compression_max_size(ZIO_COMPRESS_LZ4,
dev->l2ad_vdev->vdev_ashift,
dev->l2ad_vdev->vdev_ashift, sizeof (*lb)), 0);
/* a log block is never entirely zero */
ASSERT(psize != 0);
asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(asize <= sizeof (*lb));
/*
* Update the start log block pointer in the device header to point
* to the log block we're about to write.
*/
l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0];
l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand;
l2dhdr->dh_start_lbps[0].lbp_payload_asize =
dev->l2ad_log_blk_payload_asize;
l2dhdr->dh_start_lbps[0].lbp_payload_start =
dev->l2ad_log_blk_payload_start;
L2BLK_SET_LSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb));
L2BLK_SET_PSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, asize);
L2BLK_SET_CHECKSUM(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_CHECKSUM_FLETCHER_4);
if (asize < sizeof (*lb)) {
/* compression succeeded */
abd_zero_off(abd, psize, asize - psize);
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_LZ4);
} else {
/* compression failed */
abd_copy_from_buf_off(abd, lb, 0, sizeof (*lb));
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_OFF);
}
/* checksum what we're about to write */
abd_fletcher_4_native(abd, asize, NULL,
&l2dhdr->dh_start_lbps[0].lbp_cksum);
abd_free(abd_buf->abd);
/* perform the write itself */
abd_buf->abd = abd;
wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand,
asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio);
(void) zio_nowait(wzio);
dev->l2ad_hand += asize;
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
/*
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
*/
memcpy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
/* bump the kstats */
ARCSTAT_INCR(arcstat_l2_write_bytes, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_writes);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio,
dev->l2ad_log_blk_payload_asize / asize);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
return (asize);
}
/*
* Validates an L2ARC log block address to make sure that it can be read
* from the provided L2ARC device.
*/
boolean_t
l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp)
{
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
uint64_t end = lbp->lbp_daddr + asize - 1;
uint64_t start = lbp->lbp_payload_start;
boolean_t evicted = B_FALSE;
/*
* A log block is valid if all of the following conditions are true:
* - it fits entirely (including its payload) between l2ad_start and
* l2ad_end
* - it has a valid size
* - neither the log block itself nor part of its payload was evicted
* by l2arc_evict():
*
* l2ad_hand l2ad_evict
* | | lbp_daddr
* | start | | end
* | | | | |
* V V V V V
* l2ad_start ============================================ l2ad_end
* --------------------------||||
* ^ ^
* | log block
* payload
*/
evicted =
l2arc_range_check_overlap(start, end, dev->l2ad_hand) ||
l2arc_range_check_overlap(start, end, dev->l2ad_evict) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end);
return (start >= dev->l2ad_start && end <= dev->l2ad_end &&
asize > 0 && asize <= sizeof (l2arc_log_blk_phys_t) &&
(!evicted || dev->l2ad_first));
}
/*
* Inserts ARC buffer header `hdr' into the current L2ARC log block on
* the device. The buffer being inserted must be present in L2ARC.
* Returns B_TRUE if the L2ARC log block is full and needs to be committed
* to L2ARC, or B_FALSE if it still has room for more ARC buffers.
*/
static boolean_t
l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_log_ent_phys_t *le;
if (dev->l2ad_log_entries == 0)
return (B_FALSE);
int index = dev->l2ad_log_ent_idx++;
ASSERT3S(index, <, dev->l2ad_log_entries);
ASSERT(HDR_HAS_L2HDR(hdr));
le = &lb->lb_entries[index];
memset(le, 0, sizeof (*le));
le->le_dva = hdr->b_dva;
le->le_birth = hdr->b_birth;
le->le_daddr = hdr->b_l2hdr.b_daddr;
if (index == 0)
dev->l2ad_log_blk_payload_start = le->le_daddr;
L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr));
L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr));
L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr));
le->le_complevel = hdr->b_complevel;
L2BLK_SET_TYPE((le)->le_prop, hdr->b_type);
L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr)));
L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr)));
L2BLK_SET_STATE((le)->le_prop, hdr->b_l2hdr.b_arcs_state);
dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev,
HDR_GET_PSIZE(hdr));
return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries);
}
/*
* Checks whether a given L2ARC device address sits in a time-sequential
* range. The trick here is that the L2ARC is a rotary buffer, so we can't
* just do a range comparison, we need to handle the situation in which the
* range wraps around the end of the L2ARC device. Arguments:
* bottom -- Lower end of the range to check (written to earlier).
* top -- Upper end of the range to check (written to later).
* check -- The address for which we want to determine if it sits in
* between the top and bottom.
*
* The 3-way conditional below represents the following cases:
*
* bottom < top : Sequentially ordered case:
* <check>--------+-------------------+
* | (overlap here?) |
* L2ARC dev V V
* |---------------<bottom>============<top>--------------|
*
* bottom > top: Looped-around case:
* <check>--------+------------------+
* | (overlap here?) |
* L2ARC dev V V
* |===============<top>---------------<bottom>===========|
* ^ ^
* | (or here?) |
* +---------------+---------<check>
*
* top == bottom : Just a single address comparison.
*/
boolean_t
l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check)
{
if (bottom < top)
return (bottom <= check && check <= top);
else if (bottom > top)
return (check <= top || bottom <= check);
else
return (check == top);
}
EXPORT_SYMBOL(arc_buf_size);
EXPORT_SYMBOL(arc_write);
EXPORT_SYMBOL(arc_read);
EXPORT_SYMBOL(arc_buf_info);
EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min,
spl_param_get_u64, ZMOD_RW, "Minimum ARC size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max,
spl_param_get_u64, ZMOD_RW, "Maximum ARC size in bytes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_balance, UINT, ZMOD_RW,
"Balance between metadata and data on ghost hits.");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int,
param_get_uint, ZMOD_RW, "Seconds before growing ARC size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int,
param_get_uint, ZMOD_RW, "log2(fraction of ARC to reclaim)");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW,
"Percent of pagecache to reclaim ARC to");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, UINT, ZMOD_RD,
"Target average block size");
ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW,
"Disable compressed ARC buffers");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int,
param_get_uint, ZMOD_RW, "Min life of prefetch block in ms");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms,
param_set_arc_int, param_get_uint, ZMOD_RW,
"Min life of prescient prefetched block in ms");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, U64, ZMOD_RW,
"Max write bytes per interval");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_boost, U64, ZMOD_RW,
"Extra write bytes during device warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom, U64, ZMOD_RW,
"Number of max device writes to precache");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom_boost, U64, ZMOD_RW,
"Compressed l2arc_headroom multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, trim_ahead, U64, ZMOD_RW,
"TRIM ahead L2ARC write size multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_secs, U64, ZMOD_RW,
"Seconds between L2ARC writing");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_min_ms, U64, ZMOD_RW,
"Min feed interval in milliseconds");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, noprefetch, INT, ZMOD_RW,
"Skip caching prefetched buffers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW,
"Turbo L2ARC warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
"No reads during writes");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, UINT, ZMOD_RW,
"Percent of ARC size allowed for L2ARC-only headers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
"Rebuild the L2ARC when importing a pool");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_blocks_min_l2size, U64, ZMOD_RW,
"Min size in bytes to write rebuild log blocks in L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, mfuonly, INT, ZMOD_RW,
"Cache only MFU data from ARC into L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, exclude_special, INT, ZMOD_RW,
"Exclude dbufs on special vdevs from being cached to L2ARC if set.");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int,
param_get_uint, ZMOD_RW, "System free memory I/O throttle in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_u64,
spl_param_get_u64, ZMOD_RW, "System free memory target size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_u64,
spl_param_get_u64, ZMOD_RW, "Minimum bytes of dnodes in ARC");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent,
param_set_arc_int, param_get_uint, ZMOD_RW,
"Percent of ARC meta buffers for dnodes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, UINT, ZMOD_RW,
"Percentage of excess dnodes to try to unpin");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, UINT, ZMOD_RW,
"When full, ARC allocation waits for eviction of this % of alloc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, UINT, ZMOD_RW,
"The number of headers to evict per sublist before moving to the next");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, prune_task_threads, INT, ZMOD_RW,
"Number of arc_prune threads");
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index 90395cad6e45..5212751f9a63 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -1,5442 +1,5442 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
*/
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/dmu.h>
#include <sys/dmu_send.h>
#include <sys/dmu_impl.h>
#include <sys/dbuf.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dmu_tx.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/blkptr.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/callb.h>
#include <sys/abd.h>
#include <sys/brt.h>
#include <sys/vdev.h>
#include <cityhash.h>
#include <sys/spa_impl.h>
#include <sys/wmsum.h>
#include <sys/vdev_impl.h>
static kstat_t *dbuf_ksp;
typedef struct dbuf_stats {
/*
* Various statistics about the size of the dbuf cache.
*/
kstat_named_t cache_count;
kstat_named_t cache_size_bytes;
kstat_named_t cache_size_bytes_max;
/*
* Statistics regarding the bounds on the dbuf cache size.
*/
kstat_named_t cache_target_bytes;
kstat_named_t cache_lowater_bytes;
kstat_named_t cache_hiwater_bytes;
/*
* Total number of dbuf cache evictions that have occurred.
*/
kstat_named_t cache_total_evicts;
/*
* The distribution of dbuf levels in the dbuf cache and
* the total size of all dbufs at each level.
*/
kstat_named_t cache_levels[DN_MAX_LEVELS];
kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
/*
* Statistics about the dbuf hash table.
*/
kstat_named_t hash_hits;
kstat_named_t hash_misses;
kstat_named_t hash_collisions;
kstat_named_t hash_elements;
/*
* Number of sublists containing more than one dbuf in the dbuf
* hash table. Keep track of the longest hash chain.
*/
kstat_named_t hash_chains;
kstat_named_t hash_chain_max;
/*
* Number of times a dbuf_create() discovers that a dbuf was
* already created and in the dbuf hash table.
*/
kstat_named_t hash_insert_race;
/*
* Number of entries in the hash table dbuf and mutex arrays.
*/
kstat_named_t hash_table_count;
kstat_named_t hash_mutex_count;
/*
* Statistics about the size of the metadata dbuf cache.
*/
kstat_named_t metadata_cache_count;
kstat_named_t metadata_cache_size_bytes;
kstat_named_t metadata_cache_size_bytes_max;
/*
* For diagnostic purposes, this is incremented whenever we can't add
* something to the metadata cache because it's full, and instead put
* the data in the regular dbuf cache.
*/
kstat_named_t metadata_cache_overflow;
} dbuf_stats_t;
dbuf_stats_t dbuf_stats = {
{ "cache_count", KSTAT_DATA_UINT64 },
{ "cache_size_bytes", KSTAT_DATA_UINT64 },
{ "cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "cache_target_bytes", KSTAT_DATA_UINT64 },
{ "cache_lowater_bytes", KSTAT_DATA_UINT64 },
{ "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
{ "cache_total_evicts", KSTAT_DATA_UINT64 },
{ { "cache_levels_N", KSTAT_DATA_UINT64 } },
{ { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
{ "hash_hits", KSTAT_DATA_UINT64 },
{ "hash_misses", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "hash_insert_race", KSTAT_DATA_UINT64 },
{ "hash_table_count", KSTAT_DATA_UINT64 },
{ "hash_mutex_count", KSTAT_DATA_UINT64 },
{ "metadata_cache_count", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "metadata_cache_overflow", KSTAT_DATA_UINT64 }
};
struct {
wmsum_t cache_count;
wmsum_t cache_total_evicts;
wmsum_t cache_levels[DN_MAX_LEVELS];
wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
wmsum_t hash_hits;
wmsum_t hash_misses;
wmsum_t hash_collisions;
wmsum_t hash_elements;
wmsum_t hash_chains;
wmsum_t hash_insert_race;
wmsum_t metadata_cache_count;
wmsum_t metadata_cache_overflow;
} dbuf_sums;
#define DBUF_STAT_INCR(stat, val) \
wmsum_add(&dbuf_sums.stat, val)
#define DBUF_STAT_DECR(stat, val) \
DBUF_STAT_INCR(stat, -(val))
#define DBUF_STAT_BUMP(stat) \
DBUF_STAT_INCR(stat, 1)
#define DBUF_STAT_BUMPDOWN(stat) \
DBUF_STAT_INCR(stat, -1)
#define DBUF_STAT_MAX(stat, v) { \
uint64_t _m; \
while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
(_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
continue; \
}
static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
/*
* Global data structures and functions for the dbuf cache.
*/
static kmem_cache_t *dbuf_kmem_cache;
kmem_cache_t *dbuf_dirty_kmem_cache;
static taskq_t *dbu_evict_taskq;
static kthread_t *dbuf_cache_evict_thread;
static kmutex_t dbuf_evict_lock;
static kcondvar_t dbuf_evict_cv;
static boolean_t dbuf_evict_thread_exit;
/*
* There are two dbuf caches; each dbuf can only be in one of them at a time.
*
* 1. Cache of metadata dbufs, to help make read-heavy administrative commands
* from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
* that represent the metadata that describes filesystems/snapshots/
* bookmarks/properties/etc. We only evict from this cache when we export a
* pool, to short-circuit as much I/O as possible for all administrative
* commands that need the metadata. There is no eviction policy for this
* cache, because we try to only include types in it which would occupy a
* very small amount of space per object but create a large impact on the
* performance of these commands. Instead, after it reaches a maximum size
* (which should only happen on very small memory systems with a very large
* number of filesystem objects), we stop taking new dbufs into the
* metadata cache, instead putting them in the normal dbuf cache.
*
* 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
* are not currently held but have been recently released. These dbufs
* are not eligible for arc eviction until they are aged out of the cache.
* Dbufs that are aged out of the cache will be immediately destroyed and
* become eligible for arc eviction.
*
* Dbufs are added to these caches once the last hold is released. If a dbuf is
* later accessed and still exists in the dbuf cache, then it will be removed
* from the cache and later re-added to the head of the cache.
*
* If a given dbuf meets the requirements for the metadata cache, it will go
* there, otherwise it will be considered for the generic LRU dbuf cache. The
* caches and the refcounts tracking their sizes are stored in an array indexed
* by those caches' matching enum values (from dbuf_cached_state_t).
*/
typedef struct dbuf_cache {
multilist_t cache;
zfs_refcount_t size ____cacheline_aligned;
} dbuf_cache_t;
dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
/* Size limits for the caches */
static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
/* Set the default sizes of the caches to log2 fraction of arc size */
static uint_t dbuf_cache_shift = 5;
static uint_t dbuf_metadata_cache_shift = 6;
/* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
static uint_t dbuf_mutex_cache_shift = 0;
static unsigned long dbuf_cache_target_bytes(void);
static unsigned long dbuf_metadata_cache_target_bytes(void);
/*
* The LRU dbuf cache uses a three-stage eviction policy:
* - A low water marker designates when the dbuf eviction thread
* should stop evicting from the dbuf cache.
* - When we reach the maximum size (aka mid water mark), we
* signal the eviction thread to run.
* - The high water mark indicates when the eviction thread
* is unable to keep up with the incoming load and eviction must
* happen in the context of the calling thread.
*
* The dbuf cache:
* (max size)
* low water mid water hi water
* +----------------------------------------+----------+----------+
* | | | |
* | | | |
* | | | |
* | | | |
* +----------------------------------------+----------+----------+
* stop signal evict
* evicting eviction directly
* thread
*
* The high and low water marks indicate the operating range for the eviction
* thread. The low water mark is, by default, 90% of the total size of the
* cache and the high water mark is at 110% (both of these percentages can be
* changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
* respectively). The eviction thread will try to ensure that the cache remains
* within this range by waking up every second and checking if the cache is
* above the low water mark. The thread can also be woken up by callers adding
* elements into the cache if the cache is larger than the mid water (i.e max
* cache size). Once the eviction thread is woken up and eviction is required,
* it will continue evicting buffers until it's able to reduce the cache size
* to the low water mark. If the cache size continues to grow and hits the high
* water mark, then callers adding elements to the cache will begin to evict
* directly from the cache until the cache is no longer above the high water
* mark.
*/
/*
* The percentage above and below the maximum cache size.
*/
static uint_t dbuf_cache_hiwater_pct = 10;
static uint_t dbuf_cache_lowater_pct = 10;
static int
dbuf_cons(void *vdb, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
dmu_buf_impl_t *db = vdb;
memset(db, 0, sizeof (dmu_buf_impl_t));
mutex_init(&db->db_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
rw_init(&db->db_rwlock, NULL, RW_NOLOCKDEP, NULL);
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
multilist_link_init(&db->db_cache_link);
zfs_refcount_create(&db->db_holds);
return (0);
}
static void
dbuf_dest(void *vdb, void *unused)
{
(void) unused;
dmu_buf_impl_t *db = vdb;
mutex_destroy(&db->db_mtx);
rw_destroy(&db->db_rwlock);
cv_destroy(&db->db_changed);
ASSERT(!multilist_link_active(&db->db_cache_link));
zfs_refcount_destroy(&db->db_holds);
}
/*
* dbuf hash table routines
*/
static dbuf_hash_table_t dbuf_hash_table;
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
{
return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
}
#define DTRACE_SET_STATE(db, why) \
DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
const char *, why)
#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
((dbuf)->db.db_object == (obj) && \
(dbuf)->db_objset == (os) && \
(dbuf)->db_level == (level) && \
(dbuf)->db_blkid == (blkid))
dmu_buf_impl_t *
dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
uint64_t *hash_out)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t hv;
uint64_t idx;
dmu_buf_impl_t *db;
hv = dbuf_hash(os, obj, level, blkid);
idx = hv & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
if (DBUF_EQUAL(db, os, obj, level, blkid)) {
mutex_enter(&db->db_mtx);
if (db->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (db);
}
mutex_exit(&db->db_mtx);
}
}
mutex_exit(DBUF_HASH_MUTEX(h, idx));
if (hash_out != NULL)
*hash_out = hv;
return (NULL);
}
static dmu_buf_impl_t *
dbuf_find_bonus(objset_t *os, uint64_t object)
{
dnode_t *dn;
dmu_buf_impl_t *db = NULL;
if (dnode_hold(os, object, FTAG, &dn) == 0) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus != NULL) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
return (db);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
*/
static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
objset_t *os = db->db_objset;
uint64_t obj = db->db.db_object;
int level = db->db_level;
uint64_t blkid, idx;
dmu_buf_impl_t *dbf;
uint32_t i;
blkid = db->db_blkid;
ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
idx = db->db_hash & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
dbf = dbf->db_hash_next, i++) {
if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
mutex_enter(&dbf->db_mtx);
if (dbf->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (dbf);
}
mutex_exit(&dbf->db_mtx);
}
}
if (i > 0) {
DBUF_STAT_BUMP(hash_collisions);
if (i == 1)
DBUF_STAT_BUMP(hash_chains);
DBUF_STAT_MAX(hash_chain_max, i);
}
mutex_enter(&db->db_mtx);
db->db_hash_next = h->hash_table[idx];
h->hash_table[idx] = db;
mutex_exit(DBUF_HASH_MUTEX(h, idx));
DBUF_STAT_BUMP(hash_elements);
return (NULL);
}
/*
* This returns whether this dbuf should be stored in the metadata cache, which
* is based on whether it's from one of the dnode types that store data related
* to traversing dataset hierarchies.
*/
static boolean_t
dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
{
DB_DNODE_ENTER(db);
dmu_object_type_t type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
/* Check if this dbuf is one of the types we care about */
if (DMU_OT_IS_METADATA_CACHED(type)) {
/* If we hit this, then we set something up wrong in dmu_ot */
ASSERT(DMU_OT_IS_METADATA(type));
/*
* Sanity check for small-memory systems: don't allocate too
* much memory for this purpose.
*/
if (zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
dbuf_metadata_cache_target_bytes()) {
DBUF_STAT_BUMP(metadata_cache_overflow);
return (B_FALSE);
}
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Remove an entry from the hash table. It must be in the EVICTING state.
*/
static void
dbuf_hash_remove(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t idx;
dmu_buf_impl_t *dbf, **dbp;
ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
db->db_blkid), ==, db->db_hash);
idx = db->db_hash & h->hash_table_mask;
/*
* We mustn't hold db_mtx to maintain lock ordering:
* DBUF_HASH_MUTEX > db_mtx.
*/
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_state == DB_EVICTING);
ASSERT(!MUTEX_HELD(&db->db_mtx));
mutex_enter(DBUF_HASH_MUTEX(h, idx));
dbp = &h->hash_table[idx];
while ((dbf = *dbp) != db) {
dbp = &dbf->db_hash_next;
ASSERT(dbf != NULL);
}
*dbp = db->db_hash_next;
db->db_hash_next = NULL;
if (h->hash_table[idx] &&
h->hash_table[idx]->db_hash_next == NULL)
DBUF_STAT_BUMPDOWN(hash_chains);
mutex_exit(DBUF_HASH_MUTEX(h, idx));
DBUF_STAT_BUMPDOWN(hash_elements);
}
typedef enum {
DBVU_EVICTING,
DBVU_NOT_EVICTING
} dbvu_verify_type_t;
static void
dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
{
#ifdef ZFS_DEBUG
int64_t holds;
if (db->db_user == NULL)
return;
/* Only data blocks support the attachment of user data. */
ASSERT(db->db_level == 0);
/* Clients must resolve a dbuf before attaching user data. */
ASSERT(db->db.db_data != NULL);
ASSERT3U(db->db_state, ==, DB_CACHED);
holds = zfs_refcount_count(&db->db_holds);
if (verify_type == DBVU_EVICTING) {
/*
* Immediate eviction occurs when holds == dirtycnt.
* For normal eviction buffers, holds is zero on
* eviction, except when dbuf_fix_old_data() calls
* dbuf_clear_data(). However, the hold count can grow
* during eviction even though db_mtx is held (see
* dmu_bonus_hold() for an example), so we can only
* test the generic invariant that holds >= dirtycnt.
*/
ASSERT3U(holds, >=, db->db_dirtycnt);
} else {
if (db->db_user_immediate_evict == TRUE)
ASSERT3U(holds, >=, db->db_dirtycnt);
else
ASSERT3U(holds, >, 0);
}
#endif
}
static void
dbuf_evict_user(dmu_buf_impl_t *db)
{
dmu_buf_user_t *dbu = db->db_user;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (dbu == NULL)
return;
dbuf_verify_user(db, DBVU_EVICTING);
db->db_user = NULL;
#ifdef ZFS_DEBUG
if (dbu->dbu_clear_on_evict_dbufp != NULL)
*dbu->dbu_clear_on_evict_dbufp = NULL;
#endif
if (db->db_caching_status != DB_NO_CACHE) {
/*
* This is a cached dbuf, so the size of the user data is
* included in its cached amount. We adjust it here because the
* user data has already been detached from the dbuf, and the
* sync functions are not supposed to touch it (the dbuf might
* not exist anymore by the time the sync functions run.
*/
uint64_t size = dbu->dbu_size;
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size, size, dbu);
if (db->db_caching_status == DB_DBUF_CACHE)
DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
}
/*
* There are two eviction callbacks - one that we call synchronously
* and one that we invoke via a taskq. The async one is useful for
* avoiding lock order reversals and limiting stack depth.
*
* Note that if we have a sync callback but no async callback,
* it's likely that the sync callback will free the structure
* containing the dbu. In that case we need to take care to not
* dereference dbu after calling the sync evict func.
*/
boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
if (dbu->dbu_evict_func_sync != NULL)
dbu->dbu_evict_func_sync(dbu);
if (has_async) {
taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
dbu, 0, &dbu->dbu_tqent);
}
}
boolean_t
dbuf_is_metadata(dmu_buf_impl_t *db)
{
/*
* Consider indirect blocks and spill blocks to be meta data.
*/
if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
return (B_TRUE);
} else {
boolean_t is_metadata;
DB_DNODE_ENTER(db);
is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
DB_DNODE_EXIT(db);
return (is_metadata);
}
}
/*
* We want to exclude buffers that are on a special allocation class from
* L2ARC.
*/
boolean_t
dbuf_is_l2cacheable(dmu_buf_impl_t *db, blkptr_t *bp)
{
if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
(db->db_objset->os_secondary_cache ==
ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
if (l2arc_exclude_special == 0)
return (B_TRUE);
/*
* bp must be checked in the event it was passed from
* dbuf_read_impl() as the result of a the BP being set from
* a Direct I/O write in dbuf_read(). See comments in
* dbuf_read().
*/
blkptr_t *db_bp = bp == NULL ? db->db_blkptr : bp;
if (db_bp == NULL || BP_IS_HOLE(db_bp))
return (B_FALSE);
uint64_t vdev = DVA_GET_VDEV(db_bp->blk_dva);
vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
vdev_t *vd = NULL;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (vd == NULL)
return (B_TRUE);
if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
return (B_TRUE);
}
return (B_FALSE);
}
static inline boolean_t
dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
{
if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
(dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
(level > 0 ||
DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
if (l2arc_exclude_special == 0)
return (B_TRUE);
if (bp == NULL || BP_IS_HOLE(bp))
return (B_FALSE);
uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
vdev_t *vd = NULL;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (vd == NULL)
return (B_TRUE);
if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the dbuf eviction
* code is laid out; dbuf_evict_thread() assumes dbufs are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
{
dmu_buf_impl_t *db = obj;
/*
* The assumption here, is the hash value for a given
* dmu_buf_impl_t will remain constant throughout it's lifetime
* (i.e. it's objset, object, level and blkid fields don't change).
* Thus, we don't need to store the dbuf's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
db->db_level, db->db_blkid) %
multilist_get_num_sublists(ml));
}
/*
* The target size of the dbuf cache can grow with the ARC target,
* unless limited by the tunable dbuf_cache_max_bytes.
*/
static inline unsigned long
dbuf_cache_target_bytes(void)
{
return (MIN(dbuf_cache_max_bytes,
arc_target_bytes() >> dbuf_cache_shift));
}
/*
* The target size of the dbuf metadata cache can grow with the ARC target,
* unless limited by the tunable dbuf_metadata_cache_max_bytes.
*/
static inline unsigned long
dbuf_metadata_cache_target_bytes(void)
{
return (MIN(dbuf_metadata_cache_max_bytes,
arc_target_bytes() >> dbuf_metadata_cache_shift));
}
static inline uint64_t
dbuf_cache_hiwater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target +
(dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
}
static inline uint64_t
dbuf_cache_lowater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target -
(dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
}
static inline boolean_t
dbuf_cache_above_lowater(void)
{
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
dbuf_cache_lowater_bytes());
}
/*
* Evict the oldest eligible dbuf from the dbuf cache.
*/
static void
dbuf_evict_one(void)
{
int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
multilist_sublist_t *mls = multilist_sublist_lock_idx(
&dbuf_caches[DB_DBUF_CACHE].cache, idx);
ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
dmu_buf_impl_t *db = multilist_sublist_tail(mls);
while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
db = multilist_sublist_prev(mls, db);
}
DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
multilist_sublist_t *, mls);
if (db != NULL) {
multilist_sublist_remove(mls, db);
multilist_sublist_unlock(mls);
uint64_t size = db->db.db_size;
uint64_t usize = dmu_buf_user_size(&db->db);
(void) zfs_refcount_remove_many(
&dbuf_caches[DB_DBUF_CACHE].size, size, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[DB_DBUF_CACHE].size, usize, db->db_user);
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size + usize);
ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
db->db_caching_status = DB_NO_CACHE;
dbuf_destroy(db);
DBUF_STAT_BUMP(cache_total_evicts);
} else {
multilist_sublist_unlock(mls);
}
}
/*
* The dbuf evict thread is responsible for aging out dbufs from the
* cache. Once the cache has reached it's maximum size, dbufs are removed
* and destroyed. The eviction thread will continue running until the size
* of the dbuf cache is at or below the maximum size. Once the dbuf is aged
* out of the cache it is destroyed and becomes eligible for arc eviction.
*/
static __attribute__((noreturn)) void
dbuf_evict_thread(void *unused)
{
(void) unused;
callb_cpr_t cpr;
CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
mutex_enter(&dbuf_evict_lock);
while (!dbuf_evict_thread_exit) {
while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
&dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
/*
* Keep evicting as long as we're above the low water mark
* for the cache. We do this without holding the locks to
* minimize lock contention.
*/
while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
dbuf_evict_one();
}
mutex_enter(&dbuf_evict_lock);
}
dbuf_evict_thread_exit = B_FALSE;
cv_broadcast(&dbuf_evict_cv);
CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
thread_exit();
}
/*
* Wake up the dbuf eviction thread if the dbuf cache is at its max size.
* If the dbuf cache is at its high water mark, then evict a dbuf from the
* dbuf cache using the caller's context.
*/
static void
dbuf_evict_notify(uint64_t size)
{
/*
* We check if we should evict without holding the dbuf_evict_lock,
* because it's OK to occasionally make the wrong decision here,
* and grabbing the lock results in massive lock contention.
*/
if (size > dbuf_cache_target_bytes()) {
if (size > dbuf_cache_hiwater_bytes())
dbuf_evict_one();
cv_signal(&dbuf_evict_cv);
}
}
static int
dbuf_kstat_update(kstat_t *ksp, int rw)
{
dbuf_stats_t *ds = ksp->ks_data;
dbuf_hash_table_t *h = &dbuf_hash_table;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
ds->cache_count.value.ui64 =
wmsum_value(&dbuf_sums.cache_count);
ds->cache_size_bytes.value.ui64 =
zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
ds->cache_total_evicts.value.ui64 =
wmsum_value(&dbuf_sums.cache_total_evicts);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
ds->cache_levels[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels[i]);
ds->cache_levels_bytes[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
}
ds->hash_hits.value.ui64 =
wmsum_value(&dbuf_sums.hash_hits);
ds->hash_misses.value.ui64 =
wmsum_value(&dbuf_sums.hash_misses);
ds->hash_collisions.value.ui64 =
wmsum_value(&dbuf_sums.hash_collisions);
ds->hash_elements.value.ui64 =
wmsum_value(&dbuf_sums.hash_elements);
ds->hash_chains.value.ui64 =
wmsum_value(&dbuf_sums.hash_chains);
ds->hash_insert_race.value.ui64 =
wmsum_value(&dbuf_sums.hash_insert_race);
ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
ds->metadata_cache_count.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_count);
ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size);
ds->metadata_cache_overflow.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_overflow);
return (0);
}
void
dbuf_init(void)
{
uint64_t hmsize, hsize = 1ULL << 16;
dbuf_hash_table_t *h = &dbuf_hash_table;
/*
* The hash table is big enough to fill one eighth of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
hsize <<= 1;
h->hash_table = NULL;
while (h->hash_table == NULL) {
h->hash_table_mask = hsize - 1;
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
if (h->hash_table == NULL)
hsize >>= 1;
ASSERT3U(hsize, >=, 1ULL << 10);
}
/*
* The hash table buckets are protected by an array of mutexes where
* each mutex is reponsible for protecting 128 buckets. A minimum
* array size of 8192 is targeted to avoid contention.
*/
if (dbuf_mutex_cache_shift == 0)
hmsize = MAX(hsize >> 7, 1ULL << 13);
else
hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
h->hash_mutexes = NULL;
while (h->hash_mutexes == NULL) {
h->hash_mutex_mask = hmsize - 1;
h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
KM_SLEEP);
if (h->hash_mutexes == NULL)
hmsize >>= 1;
}
dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
sizeof (dmu_buf_impl_t),
0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
dbuf_dirty_kmem_cache = kmem_cache_create("dbuf_dirty_record_t",
sizeof (dbuf_dirty_record_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
for (int i = 0; i < hmsize; i++)
mutex_init(&h->hash_mutexes[i], NULL, MUTEX_NOLOCKDEP, NULL);
dbuf_stats_init(h);
/*
* All entries are queued via taskq_dispatch_ent(), so min/maxalloc
* configuration is not required.
*/
dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
multilist_create(&dbuf_caches[dcs].cache,
sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_cache_link),
dbuf_cache_multilist_index_func);
zfs_refcount_create(&dbuf_caches[dcs].size);
}
dbuf_evict_thread_exit = B_FALSE;
mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
NULL, 0, &p0, TS_RUN, minclsyspri);
wmsum_init(&dbuf_sums.cache_count, 0);
wmsum_init(&dbuf_sums.cache_total_evicts, 0);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_init(&dbuf_sums.cache_levels[i], 0);
wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
}
wmsum_init(&dbuf_sums.hash_hits, 0);
wmsum_init(&dbuf_sums.hash_misses, 0);
wmsum_init(&dbuf_sums.hash_collisions, 0);
wmsum_init(&dbuf_sums.hash_elements, 0);
wmsum_init(&dbuf_sums.hash_chains, 0);
wmsum_init(&dbuf_sums.hash_insert_race, 0);
wmsum_init(&dbuf_sums.metadata_cache_count, 0);
wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dbuf_ksp != NULL) {
for (int i = 0; i < DN_MAX_LEVELS; i++) {
snprintf(dbuf_stats.cache_levels[i].name,
KSTAT_STRLEN, "cache_level_%d", i);
dbuf_stats.cache_levels[i].data_type =
KSTAT_DATA_UINT64;
snprintf(dbuf_stats.cache_levels_bytes[i].name,
KSTAT_STRLEN, "cache_level_%d_bytes", i);
dbuf_stats.cache_levels_bytes[i].data_type =
KSTAT_DATA_UINT64;
}
dbuf_ksp->ks_data = &dbuf_stats;
dbuf_ksp->ks_update = dbuf_kstat_update;
kstat_install(dbuf_ksp);
}
}
void
dbuf_fini(void)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
dbuf_stats_destroy();
for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
mutex_destroy(&h->hash_mutexes[i]);
vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
sizeof (kmutex_t));
kmem_cache_destroy(dbuf_kmem_cache);
kmem_cache_destroy(dbuf_dirty_kmem_cache);
taskq_destroy(dbu_evict_taskq);
mutex_enter(&dbuf_evict_lock);
dbuf_evict_thread_exit = B_TRUE;
while (dbuf_evict_thread_exit) {
cv_signal(&dbuf_evict_cv);
cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
mutex_destroy(&dbuf_evict_lock);
cv_destroy(&dbuf_evict_cv);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
zfs_refcount_destroy(&dbuf_caches[dcs].size);
multilist_destroy(&dbuf_caches[dcs].cache);
}
if (dbuf_ksp != NULL) {
kstat_delete(dbuf_ksp);
dbuf_ksp = NULL;
}
wmsum_fini(&dbuf_sums.cache_count);
wmsum_fini(&dbuf_sums.cache_total_evicts);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_fini(&dbuf_sums.cache_levels[i]);
wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
}
wmsum_fini(&dbuf_sums.hash_hits);
wmsum_fini(&dbuf_sums.hash_misses);
wmsum_fini(&dbuf_sums.hash_collisions);
wmsum_fini(&dbuf_sums.hash_elements);
wmsum_fini(&dbuf_sums.hash_chains);
wmsum_fini(&dbuf_sums.hash_insert_race);
wmsum_fini(&dbuf_sums.metadata_cache_count);
wmsum_fini(&dbuf_sums.metadata_cache_overflow);
}
/*
* Other stuff.
*/
#ifdef ZFS_DEBUG
static void
dbuf_verify(dmu_buf_impl_t *db)
{
dnode_t *dn;
dbuf_dirty_record_t *dr;
uint32_t txg_prev;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
return;
ASSERT(db->db_objset != NULL);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn == NULL) {
ASSERT(db->db_parent == NULL);
ASSERT(db->db_blkptr == NULL);
} else {
ASSERT3U(db->db.db_object, ==, dn->dn_object);
ASSERT3P(db->db_objset, ==, dn->dn_objset);
ASSERT3U(db->db_level, <, dn->dn_nlevels);
ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID ||
!avl_is_empty(&dn->dn_dbufs));
}
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dn != NULL);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
} else if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn != NULL);
ASSERT0(db->db.db_offset);
} else {
ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
}
if ((dr = list_head(&db->db_dirty_records)) != NULL) {
ASSERT(dr->dr_dbuf == db);
txg_prev = dr->dr_txg;
for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
dr = list_next(&db->db_dirty_records, dr)) {
ASSERT(dr->dr_dbuf == db);
ASSERT(txg_prev > dr->dr_txg);
txg_prev = dr->dr_txg;
}
}
/*
* We can't assert that db_size matches dn_datablksz because it
* can be momentarily different when another thread is doing
* dnode_set_blksz().
*/
if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
dr = db->db_data_pending;
/*
* It should only be modified in syncing context, so
* make sure we only have one copy of the data.
*/
ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
}
/* verify db->db_blkptr */
if (db->db_blkptr) {
if (db->db_parent == dn->dn_dbuf) {
/* db is pointed to by the dnode */
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
ASSERT(db->db_parent == NULL);
else
ASSERT(db->db_parent != NULL);
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
} else {
/* db is pointed to by an indirect block */
int epb __maybe_unused = db->db_parent->db.db_size >>
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
ASSERT3U(db->db_parent->db.db_object, ==,
db->db.db_object);
/*
* dnode_grow_indblksz() can make this fail if we don't
* have the parent's rwlock. XXX indblksz no longer
* grows. safe to do this now?
*/
if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
ASSERT3P(db->db_blkptr, ==,
((blkptr_t *)db->db_parent->db.db_data +
db->db_blkid % epb));
}
}
}
if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
(db->db_buf == NULL || db->db_buf->b_data) &&
db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
/*
* If the blkptr isn't set but they have nonzero data,
* it had better be dirty, otherwise we'll lose that
* data when we evict this buffer.
*
* There is an exception to this rule for indirect blocks; in
* this case, if the indirect block is a hole, we fill in a few
* fields on each of the child blocks (importantly, birth time)
* to prevent hole birth times from being lost when you
* partially fill in a hole.
*/
if (db->db_dirtycnt == 0) {
if (db->db_level == 0) {
uint64_t *buf = db->db.db_data;
int i;
for (i = 0; i < db->db.db_size >> 3; i++) {
ASSERT(buf[i] == 0);
}
} else {
blkptr_t *bps = db->db.db_data;
ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
db->db.db_size);
/*
* We want to verify that all the blkptrs in the
* indirect block are holes, but we may have
* automatically set up a few fields for them.
* We iterate through each blkptr and verify
* they only have those fields set.
*/
for (int i = 0;
i < db->db.db_size / sizeof (blkptr_t);
i++) {
blkptr_t *bp = &bps[i];
ASSERT(ZIO_CHECKSUM_IS_ZERO(
&bp->blk_cksum));
ASSERT(
DVA_IS_EMPTY(&bp->blk_dva[0]) &&
DVA_IS_EMPTY(&bp->blk_dva[1]) &&
DVA_IS_EMPTY(&bp->blk_dva[2]));
ASSERT0(bp->blk_fill);
ASSERT0(bp->blk_pad[0]);
ASSERT0(bp->blk_pad[1]);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(BP_IS_HOLE(bp));
ASSERT0(BP_GET_PHYSICAL_BIRTH(bp));
}
}
}
}
DB_DNODE_EXIT(db);
}
#endif
static void
dbuf_clear_data(dmu_buf_impl_t *db)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
dbuf_evict_user(db);
ASSERT3P(db->db_buf, ==, NULL);
db->db.db_data = NULL;
if (db->db_state != DB_NOFILL) {
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "clear data");
}
}
static void
dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(buf != NULL);
db->db_buf = buf;
ASSERT(buf->b_data != NULL);
db->db.db_data = buf->b_data;
}
static arc_buf_t *
dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
{
spa_t *spa = db->db_objset->os_spa;
return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
/*
* Loan out an arc_buf for read. Return the loaned arc_buf.
*/
arc_buf_t *
dbuf_loan_arcbuf(dmu_buf_impl_t *db)
{
arc_buf_t *abuf;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
int blksz = db->db.db_size;
spa_t *spa = db->db_objset->os_spa;
mutex_exit(&db->db_mtx);
abuf = arc_loan_buf(spa, B_FALSE, blksz);
memcpy(abuf->b_data, db->db.db_data, blksz);
} else {
abuf = db->db_buf;
arc_loan_inuse_buf(abuf, db);
db->db_buf = NULL;
dbuf_clear_data(db);
mutex_exit(&db->db_mtx);
}
return (abuf);
}
/*
* Calculate which level n block references the data at the level 0 offset
* provided.
*/
uint64_t
dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
{
if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
/*
* The level n blkid is equal to the level 0 blkid divided by
* the number of level 0s in a level n block.
*
* The level 0 blkid is offset >> datablkshift =
* offset / 2^datablkshift.
*
* The number of level 0s in a level n is the number of block
* pointers in an indirect block, raised to the power of level.
* This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
* 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
*
* Thus, the level n blkid is: offset /
* ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
* = offset / 2^(datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
* = offset >> (datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
*/
const unsigned exp = dn->dn_datablkshift +
level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
if (exp >= 8 * sizeof (offset)) {
/* This only happens on the highest indirection level */
ASSERT3U(level, ==, dn->dn_nlevels - 1);
return (0);
}
ASSERT3U(exp, <, 8 * sizeof (offset));
return (offset >> exp);
} else {
ASSERT3U(offset, <, dn->dn_datablksz);
return (0);
}
}
/*
* This function is used to lock the parent of the provided dbuf. This should be
* used when modifying or reading db_blkptr.
*/
db_lock_type_t
dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
{
enum db_lock_type ret = DLT_NONE;
if (db->db_parent != NULL) {
rw_enter(&db->db_parent->db_rwlock, rw);
ret = DLT_PARENT;
} else if (dmu_objset_ds(db->db_objset) != NULL) {
rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
tag);
ret = DLT_OBJSET;
}
/*
* We only return a DLT_NONE lock when it's the top-most indirect block
* of the meta-dnode of the MOS.
*/
return (ret);
}
/*
* We need to pass the lock type in because it's possible that the block will
* move from being the topmost indirect block in a dnode (and thus, have no
* parent) to not the top-most via an indirection increase. This would cause a
* panic if we didn't pass the lock type in.
*/
void
dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
{
if (type == DLT_PARENT)
rw_exit(&db->db_parent->db_rwlock);
else if (type == DLT_OBJSET)
rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
}
static void
dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *vdb)
{
(void) zb, (void) bp;
dmu_buf_impl_t *db = vdb;
mutex_enter(&db->db_mtx);
ASSERT3U(db->db_state, ==, DB_READ);
/*
* All reads are synchronous, so we must have a hold on the dbuf
*/
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
if (buf == NULL) {
/* i/o error */
ASSERT(zio == NULL || zio->io_error != 0);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT3P(db->db_buf, ==, NULL);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "i/o error");
} else if (db->db_level == 0 && db->db_freed_in_flight) {
/* freed in flight */
ASSERT(zio == NULL || zio->io_error == 0);
arc_release(buf, db);
memset(buf->b_data, 0, db->db.db_size);
arc_buf_freeze(buf);
db->db_freed_in_flight = FALSE;
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "freed in flight");
} else {
/* success */
ASSERT(zio == NULL || zio->io_error == 0);
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "successful read");
}
cv_broadcast(&db->db_changed);
dbuf_rele_and_unlock(db, NULL, B_FALSE);
}
/*
* Shortcut for performing reads on bonus dbufs. Returns
* an error if we fail to verify the dnode associated with
* a decrypted block. Otherwise success.
*/
static int
dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn)
{
int bonuslen, max_bonuslen;
bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(DB_DNODE_HELD(db));
ASSERT3U(bonuslen, <=, db->db.db_size);
db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
if (bonuslen < max_bonuslen)
memset(db->db.db_data, 0, max_bonuslen);
if (bonuslen)
memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "bonus buffer filled");
return (0);
}
static void
dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
{
blkptr_t *bps = db->db.db_data;
uint32_t indbs = 1ULL << dn->dn_indblkshift;
int n_bps = indbs >> SPA_BLKPTRSHIFT;
for (int i = 0; i < n_bps; i++) {
blkptr_t *bp = &bps[i];
ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
dn->dn_datablksz : BP_GET_LSIZE(dbbp));
BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
BP_SET_BIRTH(bp, BP_GET_LOGICAL_BIRTH(dbbp), 0);
}
}
/*
* Handle reads on dbufs that are holes, if necessary. This function
* requires that the dbuf's mutex is held. Returns success (0) if action
* was taken, ENOENT if no action was taken.
*/
static int
dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
int is_hole = bp == NULL || BP_IS_HOLE(bp);
/*
* For level 0 blocks only, if the above check fails:
* Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
* processes the delete record and clears the bp while we are waiting
* for the dn_mtx (resulting in a "no" from block_freed).
*/
if (!is_hole && db->db_level == 0)
is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
if (is_hole) {
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
memset(db->db.db_data, 0, db->db.db_size);
if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
BP_GET_LOGICAL_BIRTH(bp) != 0) {
dbuf_handle_indirect_hole(db, dn, bp);
}
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "hole read satisfied");
return (0);
}
return (ENOENT);
}
/*
* This function ensures that, when doing a decrypting read of a block,
* we make sure we have decrypted the dnode associated with it. We must do
* this so that we ensure we are fully authenticating the checksum-of-MACs
* tree from the root of the objset down to this block. Indirect blocks are
* always verified against their secure checksum-of-MACs assuming that the
* dnode containing them is correct. Now that we are doing a decrypting read,
* we can be sure that the key is loaded and verify that assumption. This is
* especially important considering that we always read encrypted dnode
* blocks as raw data (without verifying their MACs) to start, and
* decrypt / authenticate them when we need to read an encrypted bonus buffer.
*/
static int
dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
{
objset_t *os = db->db_objset;
dmu_buf_impl_t *dndb;
arc_buf_t *dnbuf;
zbookmark_phys_t zb;
int err;
if ((flags & DB_RF_NO_DECRYPT) != 0 ||
!os->os_encrypted || os->os_raw_receive ||
(dndb = dn->dn_dbuf) == NULL)
return (0);
dnbuf = dndb->db_buf;
if (!arc_is_encrypted(dnbuf))
return (0);
mutex_enter(&dndb->db_mtx);
/*
* Since dnode buffer is modified by sync process, there can be only
* one copy of it. It means we can not modify (decrypt) it while it
* is being written. I don't see how this may happen now, since
* encrypted dnode writes by receive should be completed before any
* plain-text reads due to txg wait, but better be safe than sorry.
*/
while (1) {
if (!arc_is_encrypted(dnbuf)) {
mutex_exit(&dndb->db_mtx);
return (0);
}
dbuf_dirty_record_t *dr = dndb->db_data_pending;
if (dr == NULL || dr->dt.dl.dr_data != dnbuf)
break;
cv_wait(&dndb->db_changed, &dndb->db_mtx);
};
SET_BOOKMARK(&zb, dmu_objset_id(os),
DMU_META_DNODE_OBJECT, 0, dndb->db_blkid);
err = arc_untransform(dnbuf, os->os_spa, &zb, B_TRUE);
/*
* An error code of EACCES tells us that the key is still not
* available. This is ok if we are only reading authenticated
* (and therefore non-encrypted) blocks.
*/
if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
(db->db_blkid == DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
err = 0;
mutex_exit(&dndb->db_mtx);
return (err);
}
/*
* Drops db_mtx and the parent lock specified by dblt and tag before
* returning.
*/
static int
dbuf_read_impl(dmu_buf_impl_t *db, dnode_t *dn, zio_t *zio, uint32_t flags,
db_lock_type_t dblt, blkptr_t *bp, const void *tag)
{
zbookmark_phys_t zb;
uint32_t aflags = ARC_FLAG_NOWAIT;
int err, zio_flags;
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
ASSERT(db->db_buf == NULL);
ASSERT(db->db_parent == NULL ||
RW_LOCK_HELD(&db->db_parent->db_rwlock));
if (db->db_blkid == DMU_BONUS_BLKID) {
err = dbuf_read_bonus(db, dn);
goto early_unlock;
}
err = dbuf_read_hole(db, dn, bp);
if (err == 0)
goto early_unlock;
ASSERT(bp != NULL);
/*
* Any attempt to read a redacted block should result in an error. This
* will never happen under normal conditions, but can be useful for
* debugging purposes.
*/
if (BP_IS_REDACTED(bp)) {
ASSERT(dsl_dataset_feature_is_active(
db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
err = SET_ERROR(EIO);
goto early_unlock;
}
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bp)) {
spa_log_error(db->db_objset->os_spa, &zb,
BP_GET_LOGICAL_BIRTH(bp));
err = SET_ERROR(EIO);
goto early_unlock;
}
db->db_state = DB_READ;
DTRACE_SET_STATE(db, "read issued");
mutex_exit(&db->db_mtx);
if (!DBUF_IS_CACHEABLE(db))
aflags |= ARC_FLAG_UNCACHED;
else if (dbuf_is_l2cacheable(db, bp))
aflags |= ARC_FLAG_L2CACHE;
dbuf_add_ref(db, NULL);
zio_flags = (flags & DB_RF_CANFAIL) ?
ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(bp))
zio_flags |= ZIO_FLAG_RAW;
/*
* The zio layer will copy the provided blkptr later, but we need to
* do this now so that we can release the parent's rwlock. We have to
* do that now so that if dbuf_read_done is called synchronously (on
* an l1 cache hit) we don't acquire the db_mtx while holding the
* parent's rwlock, which would be a lock ordering violation.
*/
blkptr_t copy = *bp;
dmu_buf_unlock_parent(db, dblt, tag);
return (arc_read(zio, db->db_objset->os_spa, &copy,
dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
&aflags, &zb));
early_unlock:
mutex_exit(&db->db_mtx);
dmu_buf_unlock_parent(db, dblt, tag);
return (err);
}
/*
* This is our just-in-time copy function. It makes a copy of buffers that
* have been modified in a previous transaction group before we access them in
* the current active group.
*
* This function is used in three places: when we are dirtying a buffer for the
* first time in a txg, when we are freeing a range in a dnode that includes
* this buffer, and when we are accessing a buffer which was received compressed
* and later referenced in a WRITE_BYREF record.
*
* Note that when we are called from dbuf_free_range() we do not put a hold on
* the buffer, we just traverse the active dbuf list for the dnode.
*/
static void
dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db.db_data != NULL);
ASSERT(db->db_level == 0);
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
if (dr == NULL ||
(dr->dt.dl.dr_data !=
((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
return;
/*
* If the last dirty record for this dbuf has not yet synced
* and its referencing the dbuf data, either:
* reset the reference to point to a new copy,
* or (if there a no active holders)
* just null out the current db_data pointer.
*/
ASSERT3U(dr->dr_txg, >=, txg - 2);
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn = DB_DNODE(db);
int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
dnode_t *dn = DB_DNODE(db);
int size = arc_buf_size(db->db_buf);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
spa_t *spa = db->db_objset->os_spa;
enum zio_compress compress_type =
arc_get_compression(db->db_buf);
uint8_t complevel = arc_get_complevel(db->db_buf);
if (arc_is_encrypted(db->db_buf)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(db->db_buf, &byteorder, salt,
iv, mac);
dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
compress_type, complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
size, arc_buf_lsize(db->db_buf), compress_type,
complevel);
} else {
dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
}
memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
} else {
db->db_buf = NULL;
dbuf_clear_data(db);
}
}
int
dbuf_read(dmu_buf_impl_t *db, zio_t *pio, uint32_t flags)
{
dnode_t *dn;
boolean_t miss = B_TRUE, need_wait = B_FALSE, prefetch;
int err;
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* Ensure that this block's dnode has been decrypted if the caller
* has requested decrypted data.
*/
err = dbuf_read_verify_dnode_crypt(db, dn, flags);
if (err != 0)
goto done;
prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
(flags & DB_RF_NOPREFETCH) == 0;
mutex_enter(&db->db_mtx);
if (flags & DB_RF_PARTIAL_FIRST)
db->db_partial_read = B_TRUE;
else if (!(flags & DB_RF_PARTIAL_MORE))
db->db_partial_read = B_FALSE;
miss = (db->db_state != DB_CACHED);
if (db->db_state == DB_READ || db->db_state == DB_FILL) {
/*
* Another reader came in while the dbuf was in flight between
* UNCACHED and CACHED. Either a writer will finish filling
* the buffer, sending the dbuf to CACHED, or the first reader's
* request will reach the read_done callback and send the dbuf
* to CACHED. Otherwise, a failure occurred and the dbuf will
* be sent to UNCACHED.
*/
if (flags & DB_RF_NEVERWAIT) {
mutex_exit(&db->db_mtx);
DB_DNODE_EXIT(db);
goto done;
}
do {
ASSERT(db->db_state == DB_READ ||
(flags & DB_RF_HAVESTRUCT) == 0);
DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, db,
zio_t *, pio);
cv_wait(&db->db_changed, &db->db_mtx);
} while (db->db_state == DB_READ || db->db_state == DB_FILL);
if (db->db_state == DB_UNCACHED) {
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
DB_DNODE_EXIT(db);
goto done;
}
}
if (db->db_state == DB_CACHED) {
/*
* If the arc buf is compressed or encrypted and the caller
* requested uncompressed data, we need to untransform it
* before returning. We also call arc_untransform() on any
* unauthenticated blocks, which will verify their MAC if
* the key is now available.
*/
if ((flags & DB_RF_NO_DECRYPT) == 0 && db->db_buf != NULL &&
(arc_is_encrypted(db->db_buf) ||
arc_is_unauthenticated(db->db_buf) ||
arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
spa_t *spa = dn->dn_objset->os_spa;
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
dbuf_fix_old_data(db, spa_syncing_txg(spa));
err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
dbuf_set_data(db, db->db_buf);
}
mutex_exit(&db->db_mtx);
} else {
ASSERT(db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL);
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
blkptr_t *bp;
/*
* If a block clone or Direct I/O write has occurred we will
* get the dirty records overridden BP so we get the most
* recent data.
*/
err = dmu_buf_get_bp_from_dbuf(db, &bp);
if (!err) {
if (pio == NULL && (db->db_state == DB_NOFILL ||
(bp != NULL && !BP_IS_HOLE(bp)))) {
spa_t *spa = dn->dn_objset->os_spa;
pio =
zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
need_wait = B_TRUE;
}
err =
dbuf_read_impl(db, dn, pio, flags, dblt, bp, FTAG);
} else {
mutex_exit(&db->db_mtx);
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/* dbuf_read_impl drops db_mtx and parent's rwlock. */
miss = (db->db_state != DB_CACHED);
}
if (err == 0 && prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, miss,
flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
/*
* If we created a zio we must execute it to avoid leaking it, even if
* it isn't attached to any work due to an error in dbuf_read_impl().
*/
if (need_wait) {
if (err == 0)
err = zio_wait(pio);
else
(void) zio_wait(pio);
pio = NULL;
}
done:
if (miss)
DBUF_STAT_BUMP(hash_misses);
else
DBUF_STAT_BUMP(hash_hits);
if (pio && err != 0) {
zio_t *zio = zio_null(pio, pio->io_spa, NULL, NULL, NULL,
ZIO_FLAG_CANFAIL);
zio->io_error = err;
zio_nowait(zio);
}
return (err);
}
static void
dbuf_noread(dmu_buf_impl_t *db)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED) {
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "assigning filled buffer");
} else if (db->db_state == DB_NOFILL) {
dbuf_clear_data(db);
} else {
ASSERT3U(db->db_state, ==, DB_CACHED);
}
mutex_exit(&db->db_mtx);
}
void
dbuf_unoverride(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
uint64_t txg = dr->dr_txg;
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* This assert is valid because dmu_sync() expects to be called by
* a zilog's get_data while holding a range lock. This call only
* comes from dbuf_dirty() callers who must also hold a range lock.
*/
ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
ASSERT(db->db_level == 0);
if (db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
return;
ASSERT(db->db_data_pending != dr);
/* free this block */
if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
zio_free(db->db_objset->os_spa, txg, bp);
if (dr->dt.dl.dr_brtwrite || dr->dt.dl.dr_diowrite) {
ASSERT0P(dr->dt.dl.dr_data);
dr->dt.dl.dr_data = db->db_buf;
}
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
dr->dt.dl.dr_nopwrite = B_FALSE;
dr->dt.dl.dr_brtwrite = B_FALSE;
dr->dt.dl.dr_diowrite = B_FALSE;
dr->dt.dl.dr_has_raw_params = B_FALSE;
/*
* In the event that Direct I/O was used, we do not
* need to release the buffer from the ARC.
*
* Release the already-written buffer, so we leave it in
* a consistent dirty state. Note that all callers are
* modifying the buffer, so they will immediately do
* another (redundant) arc_release(). Therefore, leave
* the buf thawed to save the effort of freezing &
* immediately re-thawing it.
*/
if (dr->dt.dl.dr_data)
arc_release(dr->dt.dl.dr_data, db);
}
/*
* Evict (if its unreferenced) or clear (if its referenced) any level-0
* data blocks in the free range, so that any future readers will find
* empty blocks.
*/
void
dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db, *db_next;
uint64_t txg = tx->tx_txg;
avl_index_t where;
dbuf_dirty_record_t *dr;
if (end_blkid > dn->dn_maxblkid &&
!(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
end_blkid = dn->dn_maxblkid;
dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
(u_longlong_t)end_blkid);
db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
db_search->db_level = 0;
db_search->db_blkid = start_blkid;
db_search->db_state = DB_SEARCH;
mutex_enter(&dn->dn_dbufs_mtx);
db = avl_find(&dn->dn_dbufs, db_search, &where);
ASSERT3P(db, ==, NULL);
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = db_next) {
db_next = AVL_NEXT(&dn->dn_dbufs, db);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
if (db->db_level != 0 || db->db_blkid > end_blkid) {
break;
}
ASSERT3U(db->db_blkid, >=, start_blkid);
/* found a level 0 buffer in the range */
mutex_enter(&db->db_mtx);
if (dbuf_undirty(db, tx)) {
/* mutex has been dropped and dbuf destroyed */
continue;
}
if (db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL ||
db->db_state == DB_EVICTING) {
ASSERT(db->db.db_data == NULL);
mutex_exit(&db->db_mtx);
continue;
}
if (db->db_state == DB_READ || db->db_state == DB_FILL) {
/* will be handled in dbuf_read_done or dbuf_rele */
db->db_freed_in_flight = TRUE;
mutex_exit(&db->db_mtx);
continue;
}
if (zfs_refcount_count(&db->db_holds) == 0) {
ASSERT(db->db_buf);
dbuf_destroy(db);
continue;
}
/* The dbuf is referenced */
dr = list_head(&db->db_dirty_records);
if (dr != NULL) {
if (dr->dr_txg == txg) {
/*
* This buffer is "in-use", re-adjust the file
* size to reflect that this buffer may
* contain new data when we sync.
*/
if (db->db_blkid != DMU_SPILL_BLKID &&
db->db_blkid > dn->dn_maxblkid)
dn->dn_maxblkid = db->db_blkid;
dbuf_unoverride(dr);
} else {
/*
* This dbuf is not dirty in the open context.
* Either uncache it (if its not referenced in
* the open context) or reset its contents to
* empty.
*/
dbuf_fix_old_data(db, txg);
}
}
/* clear the contents if its cached */
if (db->db_state == DB_CACHED) {
ASSERT(db->db.db_data != NULL);
arc_release(db->db_buf, db);
rw_enter(&db->db_rwlock, RW_WRITER);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
arc_buf_freeze(db->db_buf);
}
mutex_exit(&db->db_mtx);
}
mutex_exit(&dn->dn_dbufs_mtx);
kmem_free(db_search, sizeof (dmu_buf_impl_t));
}
void
dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
{
arc_buf_t *buf, *old_buf;
dbuf_dirty_record_t *dr;
int osize = db->db.db_size;
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
dnode_t *dn;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* XXX we should be doing a dbuf_read, checking the return
* value and returning that up to our callers
*/
dmu_buf_will_dirty(&db->db, tx);
VERIFY3P(db->db_buf, !=, NULL);
/* create the data buffer for the new block */
buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
/* copy old block data to the new block */
old_buf = db->db_buf;
memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
/* zero the remainder */
if (size > osize)
memset((uint8_t *)buf->b_data + osize, 0, size - osize);
mutex_enter(&db->db_mtx);
dbuf_set_data(db, buf);
arc_buf_destroy(old_buf, db);
db->db.db_size = size;
dr = list_head(&db->db_dirty_records);
/* dirty record added by dmu_buf_will_dirty() */
VERIFY(dr != NULL);
if (db->db_level == 0)
dr->dt.dl.dr_data = buf;
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
ASSERT3U(dr->dr_accounted, ==, osize);
dr->dr_accounted = size;
mutex_exit(&db->db_mtx);
dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
DB_DNODE_EXIT(db);
}
void
dbuf_release_bp(dmu_buf_impl_t *db)
{
objset_t *os __maybe_unused = db->db_objset;
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
ASSERT(arc_released(os->os_phys_buf) ||
list_link_active(&os->os_dsl_dataset->ds_synced_link));
ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
(void) arc_release(db->db_buf, db);
}
/*
* We already have a dirty record for this TXG, and we are being
* dirtied again.
*/
static void
dbuf_redirty(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
/*
* If this buffer has already been written out,
* we now need to reset its state.
*/
dbuf_unoverride(dr);
if (db->db.db_object != DMU_META_DNODE_OBJECT &&
db->db_state != DB_NOFILL) {
/* Already released on initial dirty, so just thaw. */
ASSERT(arc_released(db->db_buf));
arc_buf_thaw(db->db_buf);
}
}
}
dbuf_dirty_record_t *
dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
ASSERT(dn->dn_maxblkid >= blkid);
dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
dr->dr_txg = tx->tx_txg;
dr->dt.dll.dr_blkid = blkid;
dr->dr_accounted = dn->dn_datablksz;
/*
* There should not be any dbuf for the block that we're dirtying.
* Otherwise the buffer contents could be inconsistent between the
* dbuf and the lightweight dirty record.
*/
ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
NULL));
mutex_enter(&dn->dn_mtx);
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] != NULL) {
- range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
+ zfs_range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
}
if (dn->dn_nlevels == 1) {
ASSERT3U(blkid, <, dn->dn_nblkptr);
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
rw_exit(&dn->dn_struct_rwlock);
dnode_setdirty(dn, tx);
} else {
mutex_exit(&dn->dn_mtx);
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
1, blkid >> epbs, FTAG);
rw_exit(&dn->dn_struct_rwlock);
if (parent_db == NULL) {
kmem_free(dr, sizeof (*dr));
return (NULL);
}
int err = dbuf_read(parent_db, NULL,
(DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err != 0) {
dbuf_rele(parent_db, FTAG);
kmem_free(dr, sizeof (*dr));
return (NULL);
}
dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
dbuf_rele(parent_db, FTAG);
mutex_enter(&parent_dr->dt.di.dr_mtx);
ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
list_insert_tail(&parent_dr->dt.di.dr_children, dr);
mutex_exit(&parent_dr->dt.di.dr_mtx);
dr->dr_parent = parent_dr;
}
dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
return (dr);
}
dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dnode_t *dn;
objset_t *os;
dbuf_dirty_record_t *dr, *dr_next, *dr_head;
int txgoff = tx->tx_txg & TXG_MASK;
boolean_t drop_struct_rwlock = B_FALSE;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
DMU_TX_DIRTY_BUF(tx, db);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* Shouldn't dirty a regular buffer in syncing context. Private
* objects may be dirtied in syncing context, but only if they
* were already pre-dirtied in open context.
*/
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
RW_READER, FTAG);
}
ASSERT(!dmu_tx_is_syncing(tx) ||
BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
dn->dn_objset->os_dsl_dataset == NULL);
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
/*
* We make this assert for private objects as well, but after we
* check if we're already dirty. They are allowed to re-dirty
* in syncing context.
*/
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
mutex_enter(&db->db_mtx);
/*
* XXX make this true for indirects too? The problem is that
* transactions created with dmu_tx_create_assigned() from
* syncing context don't bother holding ahead.
*/
ASSERT(db->db_level != 0 ||
db->db_state == DB_CACHED || db->db_state == DB_FILL ||
db->db_state == DB_NOFILL);
mutex_enter(&dn->dn_mtx);
dnode_set_dirtyctx(dn, tx, db);
if (tx->tx_txg > dn->dn_dirty_txg)
dn->dn_dirty_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
if (db->db_blkid == DMU_SPILL_BLKID)
dn->dn_have_spill = B_TRUE;
/*
* If this buffer is already dirty, we're done.
*/
dr_head = list_head(&db->db_dirty_records);
ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
db->db.db_object == DMU_META_DNODE_OBJECT);
dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
if (dr_next && dr_next->dr_txg == tx->tx_txg) {
DB_DNODE_EXIT(db);
dbuf_redirty(dr_next);
mutex_exit(&db->db_mtx);
return (dr_next);
}
/*
* Only valid if not already dirty.
*/
ASSERT(dn->dn_object == 0 ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
ASSERT3U(dn->dn_nlevels, >, db->db_level);
/*
* We should only be dirtying in syncing context if it's the
* mos or we're initializing the os or it's a special object.
* However, we are allowed to dirty in syncing context provided
* we already dirtied it in open context. Hence we must make
* this assertion only if we're not already dirty.
*/
os = dn->dn_objset;
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
ASSERT(db->db.db_size != 0);
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
dmu_objset_willuse_space(os, db->db.db_size, tx);
}
/*
* If this buffer is dirty in an old transaction group we need
* to make a copy of it so that the changes we make in this
* transaction group won't leak out when we sync the older txg.
*/
dr = kmem_cache_alloc(dbuf_dirty_kmem_cache, KM_SLEEP);
memset(dr, 0, sizeof (*dr));
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
if (db->db_level == 0) {
void *data_old = db->db_buf;
if (db->db_state != DB_NOFILL) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db.db_data;
} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
/*
* Release the data buffer from the cache so
* that we can modify it without impacting
* possible other users of this cached data
* block. Note that indirect blocks and
* private objects are not released until the
* syncing state (since they are only modified
* then).
*/
arc_release(db->db_buf, db);
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db_buf;
}
ASSERT(data_old != NULL);
}
dr->dt.dl.dr_data = data_old;
} else {
mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
list_create(&dr->dt.di.dr_children,
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
dr->dr_accounted = db->db.db_size;
}
dr->dr_dbuf = db;
dr->dr_txg = tx->tx_txg;
list_insert_before(&db->db_dirty_records, dr_next, dr);
/*
* We could have been freed_in_flight between the dbuf_noread
* and dbuf_dirty. We win, as though the dbuf_noread() had
* happened after the free.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_free_ranges[txgoff] != NULL) {
- range_tree_clear(dn->dn_free_ranges[txgoff],
+ zfs_range_tree_clear(dn->dn_free_ranges[txgoff],
db->db_blkid, 1);
}
mutex_exit(&dn->dn_mtx);
db->db_freed_in_flight = FALSE;
}
/*
* This buffer is now part of this txg
*/
dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
db->db_dirtycnt += 1;
ASSERT3U(db->db_dirtycnt, <=, 3);
mutex_exit(&db->db_mtx);
if (db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_rwlock = B_TRUE;
}
/*
* If we are overwriting a dedup BP, then unless it is snapshotted,
* when we get to syncing context we will need to decrement its
* refcount in the DDT. Prefetch the relevant DDT block so that
* syncing context won't have to wait for the i/o.
*/
if (db->db_blkptr != NULL) {
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
ddt_prefetch(os->os_spa, db->db_blkptr);
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/*
* We need to hold the dn_struct_rwlock to make this assertion,
* because it protects dn_phys / dn_next_nlevels from changing.
*/
ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
dn->dn_phys->dn_nlevels > db->db_level ||
dn->dn_next_nlevels[txgoff] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
if (db->db_level == 0) {
ASSERT(!db->db_objset->os_raw_receive ||
dn->dn_maxblkid >= db->db_blkid);
dnode_new_blkid(dn, db->db_blkid, tx,
drop_struct_rwlock, B_FALSE);
ASSERT(dn->dn_maxblkid >= db->db_blkid);
}
if (db->db_level+1 < dn->dn_nlevels) {
dmu_buf_impl_t *parent = db->db_parent;
dbuf_dirty_record_t *di;
int parent_held = FALSE;
if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, FTAG);
ASSERT(parent != NULL);
parent_held = TRUE;
}
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
ASSERT3U(db->db_level + 1, ==, parent->db_level);
di = dbuf_dirty(parent, tx);
if (parent_held)
dbuf_rele(parent, FTAG);
mutex_enter(&db->db_mtx);
/*
* Since we've dropped the mutex, it's possible that
* dbuf_undirty() might have changed this out from under us.
*/
if (list_head(&db->db_dirty_records) == dr ||
dn->dn_object == DMU_META_DNODE_OBJECT) {
mutex_enter(&di->dt.di.dr_mtx);
ASSERT3U(di->dr_txg, ==, tx->tx_txg);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&di->dt.di.dr_children, dr);
mutex_exit(&di->dt.di.dr_mtx);
dr->dr_parent = di;
}
mutex_exit(&db->db_mtx);
} else {
ASSERT(db->db_level + 1 == dn->dn_nlevels);
ASSERT(db->db_blkid < dn->dn_nblkptr);
ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
}
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
static void
dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
if (dr->dt.dl.dr_data != db->db.db_data) {
struct dnode *dn = dr->dr_dnode;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
kmem_free(dr->dt.dl.dr_data, max_bonuslen);
arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
}
db->db_data_pending = NULL;
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
if (dr->dr_dbuf->db_level != 0) {
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
kmem_cache_free(dbuf_dirty_kmem_cache, dr);
ASSERT3U(db->db_dirtycnt, >, 0);
db->db_dirtycnt -= 1;
}
/*
* Undirty a buffer in the transaction group referenced by the given
* transaction. Return whether this evicted the dbuf.
*/
boolean_t
dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
uint64_t txg = tx->tx_txg;
boolean_t brtwrite;
boolean_t diowrite;
ASSERT(txg != 0);
/*
* Due to our use of dn_nlevels below, this can only be called
* in open context, unless we are operating on the MOS.
* From syncing context, dn_nlevels may be different from the
* dn_nlevels used when dbuf was dirtied.
*/
ASSERT(db->db_objset ==
dmu_objset_pool(db->db_objset)->dp_meta_objset ||
txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* If this buffer is not dirty, we're done.
*/
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
if (dr == NULL)
return (B_FALSE);
ASSERT(dr->dr_dbuf == db);
brtwrite = dr->dt.dl.dr_brtwrite;
diowrite = dr->dt.dl.dr_diowrite;
if (brtwrite) {
ASSERT3B(diowrite, ==, B_FALSE);
/*
* We are freeing a block that we cloned in the same
* transaction group.
*/
blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
brt_pending_remove(dmu_objset_spa(db->db_objset),
bp, tx);
}
}
dnode_t *dn = dr->dr_dnode;
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
ASSERT(db->db.db_size != 0);
dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
dr->dr_accounted, txg);
list_remove(&db->db_dirty_records, dr);
/*
* Note that there are three places in dbuf_dirty()
* where this dirty record may be put on a list.
* Make sure to do a list_remove corresponding to
* every one of those list_insert calls.
*/
if (dr->dr_parent) {
mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
list_remove(&dr->dr_parent->dt.di.dr_children, dr);
mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
} else if (db->db_blkid == DMU_SPILL_BLKID ||
db->db_level + 1 == dn->dn_nlevels) {
ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
mutex_exit(&dn->dn_mtx);
}
if (db->db_state != DB_NOFILL && !brtwrite) {
dbuf_unoverride(dr);
if (dr->dt.dl.dr_data != db->db_buf) {
ASSERT(db->db_buf != NULL);
ASSERT(dr->dt.dl.dr_data != NULL);
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
}
kmem_cache_free(dbuf_dirty_kmem_cache, dr);
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
ASSERT(db->db_state == DB_NOFILL || brtwrite || diowrite ||
arc_released(db->db_buf));
dbuf_destroy(db);
return (B_TRUE);
}
return (B_FALSE);
}
static void
dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
boolean_t undirty = B_FALSE;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
/*
* Quick check for dirtiness to improve performance for some workloads
* (e.g. file deletion with indirect blocks cached).
*/
mutex_enter(&db->db_mtx);
if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
/*
* It's possible that the dbuf is already dirty but not cached,
* because there are some calls to dbuf_dirty() that don't
* go through dmu_buf_will_dirty().
*/
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
if (dr != NULL) {
if (db->db_level == 0 &&
dr->dt.dl.dr_brtwrite) {
/*
* Block cloning: If we are dirtying a cloned
* level 0 block, we cannot simply redirty it,
* because this dr has no associated data.
* We will go through a full undirtying below,
* before dirtying it again.
*/
undirty = B_TRUE;
} else {
/* This dbuf is already dirty and cached. */
dbuf_redirty(dr);
mutex_exit(&db->db_mtx);
return;
}
}
}
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
flags |= DB_RF_HAVESTRUCT;
DB_DNODE_EXIT(db);
/*
* Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
* want to make sure dbuf_read() will read the pending cloned block and
* not the uderlying block that is being replaced. dbuf_undirty() will
* do brt_pending_remove() before removing the dirty record.
*/
(void) dbuf_read(db, NULL, flags);
if (undirty) {
mutex_enter(&db->db_mtx);
VERIFY(!dbuf_undirty(db, tx));
mutex_exit(&db->db_mtx);
}
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
}
boolean_t
dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
mutex_enter(&db->db_mtx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
mutex_exit(&db->db_mtx);
return (dr != NULL);
}
/*
* Normally the db_blkptr points to the most recent on-disk content for the
* dbuf (and anything newer will be cached in the dbuf). However, a pending
* block clone or not yet synced Direct I/O write will have a dirty record BP
* pointing to the most recent data.
*/
int
dmu_buf_get_bp_from_dbuf(dmu_buf_impl_t *db, blkptr_t **bp)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
int error = 0;
if (db->db_level != 0) {
*bp = db->db_blkptr;
return (0);
}
*bp = db->db_blkptr;
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
if (dr && db->db_state == DB_NOFILL) {
/* Block clone */
if (!dr->dt.dl.dr_brtwrite)
error = EIO;
else
*bp = &dr->dt.dl.dr_overridden_by;
} else if (dr && db->db_state == DB_UNCACHED) {
/* Direct I/O write */
if (dr->dt.dl.dr_diowrite)
*bp = &dr->dt.dl.dr_overridden_by;
}
return (error);
}
/*
* Direct I/O reads can read directly from the ARC, but the data has
* to be untransformed in order to copy it over into user pages.
*/
int
dmu_buf_untransform_direct(dmu_buf_impl_t *db, spa_t *spa)
{
int err = 0;
DB_DNODE_ENTER(db);
dnode_t *dn = DB_DNODE(db);
ASSERT3S(db->db_state, ==, DB_CACHED);
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* Ensure that this block's dnode has been decrypted if
* the caller has requested decrypted data.
*/
err = dbuf_read_verify_dnode_crypt(db, dn, 0);
/*
* If the arc buf is compressed or encrypted and the caller
* requested uncompressed data, we need to untransform it
* before returning. We also call arc_untransform() on any
* unauthenticated blocks, which will verify their MAC if
* the key is now available.
*/
if (err == 0 && db->db_buf != NULL &&
(arc_is_encrypted(db->db_buf) ||
arc_is_unauthenticated(db->db_buf) ||
arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
dbuf_fix_old_data(db, spa_syncing_txg(spa));
err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
dbuf_set_data(db, db->db_buf);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_hits);
return (err);
}
void
dmu_buf_will_clone_or_dio(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
/*
* Block clones and Direct I/O writes always happen in open-context.
*/
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT0(db->db_level);
ASSERT(!dmu_tx_is_syncing(tx));
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
/*
* We are going to clone or issue a Direct I/O write on this block, so
* undirty modifications done to this block so far in this txg. This
* includes writes and clones into this block.
*
* If there dirty record associated with this txg from a previous Direct
* I/O write then space accounting cleanup takes place. It is important
* to go ahead free up the space accounting through dbuf_undirty() ->
* dbuf_unoverride() -> zio_free(). Space accountiung for determining
* if a write can occur in zfs_write() happens through dmu_tx_assign().
* This can cause an issue with Direct I/O writes in the case of
* overwriting the same block, because all DVA allocations are being
* done in open-context. Constantly allowing Direct I/O overwrites to
* the same block can exhaust the pools available space leading to
* ENOSPC errors at the DVA allocation part of the ZIO pipeline, which
* will eventually suspend the pool. By cleaning up sapce acccounting
* now, the ENOSPC error can be avoided.
*
* Since we are undirtying the record in open-context, we must have a
* hold on the db, so it should never be evicted after calling
* dbuf_undirty().
*/
VERIFY3B(dbuf_undirty(db, tx), ==, B_FALSE);
ASSERT0P(dbuf_find_dirty_eq(db, tx->tx_txg));
if (db->db_buf != NULL) {
/*
* If there is an associated ARC buffer with this dbuf we can
* only destroy it if the previous dirty record does not
* reference it.
*/
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
if (dr == NULL || dr->dt.dl.dr_data != db->db_buf)
arc_buf_destroy(db->db_buf, db);
/*
* Setting the dbuf's data pointers to NULL will force all
* future reads down to the devices to get the most up to date
* version of the data after a Direct I/O write has completed.
*/
db->db_buf = NULL;
dbuf_clear_data(db);
}
ASSERT3P(db->db_buf, ==, NULL);
ASSERT3P(db->db.db_data, ==, NULL);
db->db_state = DB_NOFILL;
DTRACE_SET_STATE(db,
"allocating NOFILL buffer for clone or direct I/O write");
DBUF_VERIFY(db);
mutex_exit(&db->db_mtx);
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
db->db_state = DB_NOFILL;
DTRACE_SET_STATE(db, "allocating NOFILL buffer");
mutex_exit(&db->db_mtx);
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(tx->tx_txg != 0);
ASSERT(db->db_level == 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
dmu_tx_private_ok(tx));
mutex_enter(&db->db_mtx);
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
if (db->db_state == DB_NOFILL ||
(db->db_state == DB_UNCACHED && dr && dr->dt.dl.dr_diowrite)) {
/*
* If the fill can fail we should have a way to return back to
* the cloned or Direct I/O write data.
*/
if (canfail && dr) {
mutex_exit(&db->db_mtx);
dmu_buf_will_dirty(db_fake, tx);
return;
}
/*
* Block cloning: We will be completely overwriting a block
* cloned in this transaction group, so let's undirty the
* pending clone and mark the block as uncached. This will be
* as if the clone was never done.
*/
if (db->db_state == DB_NOFILL) {
VERIFY(!dbuf_undirty(db, tx));
db->db_state = DB_UNCACHED;
}
}
mutex_exit(&db->db_mtx);
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
/*
* This function is effectively the same as dmu_buf_will_dirty(), but
* indicates the caller expects raw encrypted data in the db, and provides
* the crypt params (byteorder, salt, iv, mac) which should be stored in the
* blkptr_t when this dbuf is written. This is only used for blocks of
* dnodes, during raw receive.
*/
void
dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
/*
* dr_has_raw_params is only processed for blocks of dnodes
* (see dbuf_sync_dnode_leaf_crypt()).
*/
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT0(db->db_level);
ASSERT(db->db_objset->os_raw_receive);
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dt.dl.dr_override_state, ==, DR_NOT_OVERRIDDEN);
dr->dt.dl.dr_has_raw_params = B_TRUE;
dr->dt.dl.dr_byteorder = byteorder;
memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
}
static void
dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
{
struct dirty_leaf *dl;
dbuf_dirty_record_t *dr;
ASSERT3U(db->db.db_object, !=, DMU_META_DNODE_OBJECT);
ASSERT0(db->db_level);
dr = list_head(&db->db_dirty_records);
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
ASSERT0(dl->dr_has_raw_params);
dl->dr_overridden_by = *bp;
dl->dr_override_state = DR_OVERRIDDEN;
BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
}
boolean_t
dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx, boolean_t failed)
{
(void) tx;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
if (db->db_state == DB_FILL) {
if (db->db_level == 0 && db->db_freed_in_flight) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
/* we were freed while filling */
/* XXX dbuf_undirty? */
memset(db->db.db_data, 0, db->db.db_size);
db->db_freed_in_flight = FALSE;
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db,
"fill done handling freed in flight");
failed = B_FALSE;
} else if (failed) {
VERIFY(!dbuf_undirty(db, tx));
arc_buf_destroy(db->db_buf, db);
db->db_buf = NULL;
dbuf_clear_data(db);
DTRACE_SET_STATE(db, "fill failed");
} else {
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "fill done");
}
cv_broadcast(&db->db_changed);
} else {
db->db_state = DB_CACHED;
failed = B_FALSE;
}
mutex_exit(&db->db_mtx);
return (failed);
}
void
dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
struct dirty_leaf *dl;
dmu_object_type_t type;
dbuf_dirty_record_t *dr;
if (etype == BP_EMBEDDED_TYPE_DATA) {
ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
SPA_FEATURE_EMBEDDED_DATA));
}
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
dmu_buf_will_not_fill(dbuf, tx);
dr = list_head(&db->db_dirty_records);
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
ASSERT0(dl->dr_has_raw_params);
encode_embedded_bp_compressed(&dl->dr_overridden_by,
data, comp, uncompressed_size, compressed_size);
BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
BP_SET_TYPE(&dl->dr_overridden_by, type);
BP_SET_LEVEL(&dl->dr_overridden_by, 0);
BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
dl->dr_override_state = DR_OVERRIDDEN;
BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
}
void
dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
dmu_object_type_t type;
ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
dmu_buf_will_not_fill(dbuf, tx);
blkptr_t bp = { { { {0} } } };
BP_SET_TYPE(&bp, type);
BP_SET_LEVEL(&bp, 0);
BP_SET_BIRTH(&bp, tx->tx_txg, 0);
BP_SET_REDACTED(&bp);
BPE_SET_LSIZE(&bp, dbuf->db_size);
dbuf_override_impl(db, &bp, tx);
}
/*
* Directly assign a provided arc buf to a given dbuf if it's not referenced
* by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
*/
void
dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_level == 0);
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
ASSERT(buf != NULL);
ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
ASSERT(tx->tx_txg != 0);
arc_return_buf(buf, db);
ASSERT(arc_released(buf));
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL);
if (db->db_state == DB_CACHED &&
zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
/*
* In practice, we will never have a case where we have an
* encrypted arc buffer while additional holds exist on the
* dbuf. We don't handle this here so we simply assert that
* fact instead.
*/
ASSERT(!arc_is_encrypted(buf));
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
memcpy(db->db.db_data, buf->b_data, db->db.db_size);
arc_buf_destroy(buf, db);
return;
}
if (db->db_state == DB_CACHED) {
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(db->db_buf != NULL);
if (dr != NULL && dr->dr_txg == tx->tx_txg) {
ASSERT(dr->dt.dl.dr_data == db->db_buf);
if (!arc_released(db->db_buf)) {
ASSERT(dr->dt.dl.dr_override_state ==
DR_OVERRIDDEN);
arc_release(db->db_buf, db);
}
dr->dt.dl.dr_data = buf;
arc_buf_destroy(db->db_buf, db);
} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
arc_release(db->db_buf, db);
arc_buf_destroy(db->db_buf, db);
}
db->db_buf = NULL;
} else if (db->db_state == DB_NOFILL) {
/*
* We will be completely replacing the cloned block. In case
* it was cloned in this transaction group, let's undirty the
* pending clone and mark the block as uncached. This will be
* as if the clone was never done.
*/
VERIFY(!dbuf_undirty(db, tx));
db->db_state = DB_UNCACHED;
}
ASSERT(db->db_buf == NULL);
dbuf_set_data(db, buf);
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "filling assigned arcbuf");
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
dmu_buf_fill_done(&db->db, tx, B_FALSE);
}
void
dbuf_destroy(dmu_buf_impl_t *db)
{
dnode_t *dn;
dmu_buf_impl_t *parent = db->db_parent;
dmu_buf_impl_t *dndb;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(zfs_refcount_is_zero(&db->db_holds));
if (db->db_buf != NULL) {
arc_buf_destroy(db->db_buf, db);
db->db_buf = NULL;
}
if (db->db_blkid == DMU_BONUS_BLKID) {
int slots = DB_DNODE(db)->dn_num_slots;
int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
if (db->db.db_data != NULL) {
kmem_free(db->db.db_data, bonuslen);
arc_space_return(bonuslen, ARC_SPACE_BONUS);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "buffer cleared");
}
}
dbuf_clear_data(db);
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
ASSERT0(dmu_buf_user_size(&db->db));
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size,
db->db.db_size, db);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
db->db_caching_status = DB_NO_CACHE;
}
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
ASSERT(db->db_data_pending == NULL);
ASSERT(list_is_empty(&db->db_dirty_records));
db->db_state = DB_EVICTING;
DTRACE_SET_STATE(db, "buffer eviction started");
db->db_blkptr = NULL;
/*
* Now that db_state is DB_EVICTING, nobody else can find this via
* the hash table. We can now drop db_mtx, which allows us to
* acquire the dn_dbufs_mtx.
*/
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dndb = dn->dn_dbuf;
if (db->db_blkid != DMU_BONUS_BLKID) {
boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
if (needlock)
mutex_enter_nested(&dn->dn_dbufs_mtx,
NESTED_SINGLE);
avl_remove(&dn->dn_dbufs, db);
membar_producer();
DB_DNODE_EXIT(db);
if (needlock)
mutex_exit(&dn->dn_dbufs_mtx);
/*
* Decrementing the dbuf count means that the hold corresponding
* to the removed dbuf is no longer discounted in dnode_move(),
* so the dnode cannot be moved until after we release the hold.
* The membar_producer() ensures visibility of the decremented
* value in dnode_move(), since DB_DNODE_EXIT doesn't actually
* release any lock.
*/
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, db, B_TRUE);
#ifdef USE_DNODE_HANDLE
db->db_dnode_handle = NULL;
#else
db->db_dnode = NULL;
#endif
dbuf_hash_remove(db);
} else {
DB_DNODE_EXIT(db);
}
ASSERT(zfs_refcount_is_zero(&db->db_holds));
db->db_parent = NULL;
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
ASSERT(db->db_hash_next == NULL);
ASSERT(db->db_blkptr == NULL);
ASSERT(db->db_data_pending == NULL);
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
ASSERT(!multilist_link_active(&db->db_cache_link));
/*
* If this dbuf is referenced from an indirect dbuf,
* decrement the ref count on the indirect dbuf.
*/
if (parent && parent != dndb) {
mutex_enter(&parent->db_mtx);
dbuf_rele_and_unlock(parent, db, B_TRUE);
}
kmem_cache_free(dbuf_kmem_cache, db);
arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
}
/*
* Note: While bpp will always be updated if the function returns success,
* parentp will not be updated if the dnode does not have dn_dbuf filled in;
* this happens when the dnode is the meta-dnode, or {user|group|project}used
* object.
*/
__attribute__((always_inline))
static inline int
dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
dmu_buf_impl_t **parentp, blkptr_t **bpp)
{
*parentp = NULL;
*bpp = NULL;
ASSERT(blkid != DMU_BONUS_BLKID);
if (blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_have_spill &&
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
else
*bpp = NULL;
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
mutex_exit(&dn->dn_mtx);
return (0);
}
int nlevels =
(dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(level * epbs, <, 64);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
/*
* This assertion shouldn't trip as long as the max indirect block size
* is less than 1M. The reason for this is that up to that point,
* the number of levels required to address an entire object with blocks
* of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
* other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
* (i.e. we can address the entire object), objects will all use at most
* N-1 levels and the assertion won't overflow. However, once epbs is
* 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
* enough to address an entire object, so objects will have 5 levels,
* but then this assertion will overflow.
*
* All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
* need to redo this logic to handle overflows.
*/
ASSERT(level >= nlevels ||
((nlevels - level - 1) * epbs) +
highbit64(dn->dn_phys->dn_nblkptr) <= 64);
if (level >= nlevels ||
blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
((nlevels - level - 1) * epbs)) ||
(fail_sparse &&
blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
/* the buffer has no parent yet */
return (SET_ERROR(ENOENT));
} else if (level < nlevels-1) {
/* this block is referenced from an indirect block */
int err;
err = dbuf_hold_impl(dn, level + 1,
blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
if (err)
return (err);
err = dbuf_read(*parentp, NULL,
(DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err) {
dbuf_rele(*parentp, NULL);
*parentp = NULL;
return (err);
}
rw_enter(&(*parentp)->db_rwlock, RW_READER);
*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
(blkid & ((1ULL << epbs) - 1));
if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
ASSERT(BP_IS_HOLE(*bpp));
rw_exit(&(*parentp)->db_rwlock);
return (0);
} else {
/* the block is referenced from the dnode */
ASSERT3U(level, ==, nlevels-1);
ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
blkid < dn->dn_phys->dn_nblkptr);
if (dn->dn_dbuf) {
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
}
*bpp = &dn->dn_phys->dn_blkptr[blkid];
return (0);
}
}
static dmu_buf_impl_t *
dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
{
objset_t *os = dn->dn_objset;
dmu_buf_impl_t *db, *odb;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_type != DMU_OT_NONE);
db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dbuf_node));
db->db_objset = os;
db->db.db_object = dn->dn_object;
db->db_level = level;
db->db_blkid = blkid;
db->db_dirtycnt = 0;
#ifdef USE_DNODE_HANDLE
db->db_dnode_handle = dn->dn_handle;
#else
db->db_dnode = dn;
#endif
db->db_parent = parent;
db->db_blkptr = blkptr;
db->db_hash = hash;
db->db_user = NULL;
db->db_user_immediate_evict = FALSE;
db->db_freed_in_flight = FALSE;
db->db_pending_evict = FALSE;
if (blkid == DMU_BONUS_BLKID) {
ASSERT3P(parent, ==, dn->dn_dbuf);
db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
db->db.db_offset = DMU_BONUS_BLKID;
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "bonus buffer created");
db->db_caching_status = DB_NO_CACHE;
/* the bonus dbuf is not placed in the hash table */
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
return (db);
} else if (blkid == DMU_SPILL_BLKID) {
db->db.db_size = (blkptr != NULL) ?
BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
db->db.db_offset = 0;
} else {
int blocksize =
db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
db->db.db_size = blocksize;
db->db.db_offset = db->db_blkid * blocksize;
}
/*
* Hold the dn_dbufs_mtx while we get the new dbuf
* in the hash table *and* added to the dbufs list.
* This prevents a possible deadlock with someone
* trying to look up this dbuf before it's added to the
* dn_dbufs list.
*/
mutex_enter(&dn->dn_dbufs_mtx);
db->db_state = DB_EVICTING; /* not worth logging this state change */
if ((odb = dbuf_hash_insert(db)) != NULL) {
/* someone else inserted it first */
mutex_exit(&dn->dn_dbufs_mtx);
kmem_cache_free(dbuf_kmem_cache, db);
DBUF_STAT_BUMP(hash_insert_race);
return (odb);
}
avl_add(&dn->dn_dbufs, db);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "regular buffer created");
db->db_caching_status = DB_NO_CACHE;
mutex_exit(&dn->dn_dbufs_mtx);
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
if (parent && parent != dn->dn_dbuf)
dbuf_add_ref(parent, db);
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
zfs_refcount_count(&dn->dn_holds) > 0);
(void) zfs_refcount_add(&dn->dn_holds, db);
dprintf_dbuf(db, "db=%p\n", db);
return (db);
}
/*
* This function returns a block pointer and information about the object,
* given a dnode and a block. This is a publicly accessible version of
* dbuf_findbp that only returns some information, rather than the
* dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
* should be locked as (at least) a reader.
*/
int
dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
{
dmu_buf_impl_t *dbp = NULL;
blkptr_t *bp2;
int err = 0;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
if (err == 0) {
ASSERT3P(bp2, !=, NULL);
*bp = *bp2;
if (dbp != NULL)
dbuf_rele(dbp, NULL);
if (datablkszsec != NULL)
*datablkszsec = dn->dn_phys->dn_datablkszsec;
if (indblkshift != NULL)
*indblkshift = dn->dn_phys->dn_indblkshift;
}
return (err);
}
typedef struct dbuf_prefetch_arg {
spa_t *dpa_spa; /* The spa to issue the prefetch in. */
zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
int dpa_curlevel; /* The current level that we're reading */
dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
void *dpa_arg; /* prefetch completion arg */
} dbuf_prefetch_arg_t;
static void
dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
{
if (dpa->dpa_cb != NULL) {
dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
dpa->dpa_zb.zb_blkid, io_done);
}
kmem_free(dpa, sizeof (*dpa));
}
static void
dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
(void) zio, (void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
if (abuf != NULL)
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
}
/*
* Actually issue the prefetch read for the block given.
*/
static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
{
ASSERT(!BP_IS_REDACTED(bp) ||
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (dbuf_prefetch_fini(dpa, B_FALSE));
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
arc_flags_t aflags =
dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
ARC_FLAG_NO_BUF;
/* dnodes are always read as raw and then converted later */
if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
dpa->dpa_curlevel == 0)
zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
ASSERT(dpa->dpa_zio != NULL);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
dbuf_issue_final_prefetch_done, dpa,
dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
}
/*
* Called when an indirect block above our prefetch target is read in. This
* will either read in the next indirect block down the tree or issue the actual
* prefetch if the next block down is our target.
*/
static void
dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
(void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
ASSERT3S(dpa->dpa_curlevel, >, 0);
if (abuf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
dbuf_prefetch_fini(dpa, B_TRUE);
return;
}
ASSERT(zio == NULL || zio->io_error == 0);
/*
* The dpa_dnode is only valid if we are called with a NULL
* zio. This indicates that the arc_read() returned without
* first calling zio_read() to issue a physical read. Once
* a physical read is made the dpa_dnode must be invalidated
* as the locks guarding it may have been dropped. If the
* dpa_dnode is still valid, then we want to add it to the dbuf
* cache. To do so, we must hold the dbuf associated with the block
* we just prefetched, read its contents so that we associate it
* with an arc_buf_t, and then release it.
*/
if (zio != NULL) {
ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
} else {
ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
}
ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
dpa->dpa_dnode = NULL;
} else if (dpa->dpa_dnode != NULL) {
uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel -
dpa->dpa_zb.zb_level));
dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
dpa->dpa_curlevel, curblkid, FTAG);
if (db == NULL) {
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
return;
}
(void) dbuf_read(db, NULL,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
dbuf_rele(db, FTAG);
}
dpa->dpa_curlevel--;
uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS)));
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
return;
} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
dbuf_issue_final_prefetch(dpa, bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
iter_aflags |= ARC_FLAG_L2CACHE;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
bp, dbuf_prefetch_indirect_done, dpa,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
arc_buf_destroy(abuf, private);
}
/*
* Issue prefetch reads for the given block on the given level. If the indirect
* blocks above that block are not in memory, we will read them in
* asynchronously. As a result, this call never blocks waiting for a read to
* complete. Note that the prefetch might fail if the dataset is encrypted and
* the encryption key is unmapped before the IO completes.
*/
int
dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
void *arg)
{
blkptr_t bp;
int epbs, nlevels, curlevel;
uint64_t curblkid;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
if (blkid > dn->dn_maxblkid)
goto no_issue;
if (level == 0 && dnode_block_freed(dn, blkid))
goto no_issue;
/*
* This dnode hasn't been written to disk yet, so there's nothing to
* prefetch.
*/
nlevels = dn->dn_phys->dn_nlevels;
if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
goto no_issue;
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
goto no_issue;
dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
level, blkid, NULL);
if (db != NULL) {
mutex_exit(&db->db_mtx);
/*
* This dbuf already exists. It is either CACHED, or
* (we assume) about to be read or filled.
*/
goto no_issue;
}
/*
* Find the closest ancestor (indirect block) of the target block
* that is present in the cache. In this indirect block, we will
* find the bp that is at curlevel, curblkid.
*/
curlevel = level;
curblkid = blkid;
while (curlevel < nlevels - 1) {
int parent_level = curlevel + 1;
uint64_t parent_blkid = curblkid >> epbs;
dmu_buf_impl_t *db;
if (dbuf_hold_impl(dn, parent_level, parent_blkid,
FALSE, TRUE, FTAG, &db) == 0) {
blkptr_t *bpp = db->db_buf->b_data;
bp = bpp[P2PHASE(curblkid, 1 << epbs)];
dbuf_rele(db, FTAG);
break;
}
curlevel = parent_level;
curblkid = parent_blkid;
}
if (curlevel == nlevels - 1) {
/* No cached indirect blocks found. */
ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
bp = dn->dn_phys->dn_blkptr[curblkid];
}
ASSERT(!BP_IS_REDACTED(&bp) ||
dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
goto no_issue;
ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
ZIO_FLAG_CANFAIL);
dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, level, blkid);
dpa->dpa_curlevel = curlevel;
dpa->dpa_prio = prio;
dpa->dpa_aflags = aflags;
dpa->dpa_spa = dn->dn_objset->os_spa;
dpa->dpa_dnode = dn;
dpa->dpa_epbs = epbs;
dpa->dpa_zio = pio;
dpa->dpa_cb = cb;
dpa->dpa_arg = arg;
if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
else if (dnode_level_is_l2cacheable(&bp, dn, level))
dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
/*
* If we have the indirect just above us, no need to do the asynchronous
* prefetch chain; we'll just run the last step ourselves. If we're at
* a higher level, though, we want to issue the prefetches for all the
* indirect blocks asynchronously, so we can go on with whatever we were
* doing.
*/
if (curlevel == level) {
ASSERT3U(curblkid, ==, blkid);
dbuf_issue_final_prefetch(dpa, &bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dnode_level_is_l2cacheable(&bp, dn, level))
iter_aflags |= ARC_FLAG_L2CACHE;
SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, curlevel, curblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
&bp, dbuf_prefetch_indirect_done, dpa,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
/*
* We use pio here instead of dpa_zio since it's possible that
* dpa may have already been freed.
*/
zio_nowait(pio);
return (1);
no_issue:
if (cb != NULL)
cb(arg, level, blkid, B_FALSE);
return (0);
}
int
dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
arc_flags_t aflags)
{
return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
}
/*
* Helper function for dbuf_hold_impl() to copy a buffer. Handles
* the case of encrypted, compressed and uncompressed buffers by
* allocating the new buffer, respectively, with arc_alloc_raw_buf(),
* arc_alloc_compressed_buf() or arc_alloc_buf().*
*
* NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
*/
noinline static void
dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
{
dbuf_dirty_record_t *dr = db->db_data_pending;
arc_buf_t *data = dr->dt.dl.dr_data;
enum zio_compress compress_type = arc_get_compression(data);
uint8_t complevel = arc_get_complevel(data);
if (arc_is_encrypted(data)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(data, &byteorder, salt, iv, mac);
dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
compress_type, complevel));
} else if (compress_type != ZIO_COMPRESS_OFF) {
dbuf_set_data(db, arc_alloc_compressed_buf(
dn->dn_objset->os_spa, db, arc_buf_size(data),
arc_buf_lsize(data), compress_type, complevel));
} else {
dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
rw_enter(&db->db_rwlock, RW_WRITER);
memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
rw_exit(&db->db_rwlock);
}
/*
* Returns with db_holds incremented, and db_mtx not held.
* Note: dn_struct_rwlock must be held.
*/
int
dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
const void *tag, dmu_buf_impl_t **dbp)
{
dmu_buf_impl_t *db, *parent = NULL;
uint64_t hv;
/* If the pool has been created, verify the tx_sync_lock is not held */
spa_t *spa = dn->dn_objset->os_spa;
dsl_pool_t *dp = spa->spa_dsl_pool;
if (dp != NULL) {
ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
}
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT3U(dn->dn_nlevels, >, level);
*dbp = NULL;
/* dbuf_find() returns with db_mtx held */
db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
if (db == NULL) {
blkptr_t *bp = NULL;
int err;
if (fail_uncached)
return (SET_ERROR(ENOENT));
ASSERT3P(parent, ==, NULL);
err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
if (fail_sparse) {
if (err == 0 && bp && BP_IS_HOLE(bp))
err = SET_ERROR(ENOENT);
if (err) {
if (parent)
dbuf_rele(parent, NULL);
return (err);
}
}
if (err && err != ENOENT)
return (err);
db = dbuf_create(dn, level, blkid, parent, bp, hv);
}
if (fail_uncached && db->db_state != DB_CACHED) {
mutex_exit(&db->db_mtx);
return (SET_ERROR(ENOENT));
}
if (db->db_buf != NULL) {
arc_buf_access(db->db_buf);
ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
}
ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
/*
* If this buffer is currently syncing out, and we are
* still referencing it from db_data, we need to make a copy
* of it in case we decide we want to dirty it again in this txg.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
db->db_state == DB_CACHED && db->db_data_pending) {
dbuf_dirty_record_t *dr = db->db_data_pending;
if (dr->dt.dl.dr_data == db->db_buf) {
ASSERT3P(db->db_buf, !=, NULL);
dbuf_hold_copy(dn, db);
}
}
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
uint64_t size = db->db.db_size;
uint64_t usize = dmu_buf_user_size(&db->db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size, size, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size, usize,
db->db_user);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
size + usize);
}
db->db_caching_status = DB_NO_CACHE;
}
(void) zfs_refcount_add(&db->db_holds, tag);
DBUF_VERIFY(db);
mutex_exit(&db->db_mtx);
/* NOTE: we can't rele the parent until after we drop the db_mtx */
if (parent)
dbuf_rele(parent, NULL);
ASSERT3P(DB_DNODE(db), ==, dn);
ASSERT3U(db->db_blkid, ==, blkid);
ASSERT3U(db->db_level, ==, level);
*dbp = db;
return (0);
}
dmu_buf_impl_t *
dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
{
return (dbuf_hold_level(dn, 0, blkid, tag));
}
dmu_buf_impl_t *
dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
{
dmu_buf_impl_t *db;
int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
return (err ? NULL : db);
}
void
dbuf_create_bonus(dnode_t *dn)
{
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_bonus == NULL);
dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
}
int
dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
if (db->db_blkid != DMU_SPILL_BLKID)
return (SET_ERROR(ENOTSUP));
if (blksz == 0)
blksz = SPA_MINBLOCKSIZE;
ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
dbuf_new_size(db, blksz, tx);
return (0);
}
void
dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
}
#pragma weak dmu_buf_add_ref = dbuf_add_ref
void
dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
{
int64_t holds = zfs_refcount_add(&db->db_holds, tag);
VERIFY3S(holds, >, 1);
}
#pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
boolean_t
dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
const void *tag)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dmu_buf_impl_t *found_db;
boolean_t result = B_FALSE;
if (blkid == DMU_BONUS_BLKID)
found_db = dbuf_find_bonus(os, obj);
else
found_db = dbuf_find(os, obj, 0, blkid, NULL);
if (found_db != NULL) {
if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
(void) zfs_refcount_add(&db->db_holds, tag);
result = B_TRUE;
}
mutex_exit(&found_db->db_mtx);
}
return (result);
}
/*
* If you call dbuf_rele() you had better not be referencing the dnode handle
* unless you have some other direct or indirect hold on the dnode. (An indirect
* hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
* Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
* dnode's parent dbuf evicting its dnode handles.
*/
void
dbuf_rele(dmu_buf_impl_t *db, const void *tag)
{
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, tag, B_FALSE);
}
void
dmu_buf_rele(dmu_buf_t *db, const void *tag)
{
dbuf_rele((dmu_buf_impl_t *)db, tag);
}
/*
* dbuf_rele() for an already-locked dbuf. This is necessary to allow
* db_dirtycnt and db_holds to be updated atomically. The 'evicting'
* argument should be set if we are already in the dbuf-evicting code
* path, in which case we don't want to recursively evict. This allows us to
* avoid deeply nested stacks that would have a call flow similar to this:
*
* dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
* ^ |
* | |
* +-----dbuf_destroy()<--dbuf_evict_one()<--------+
*
*/
void
dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
{
int64_t holds;
uint64_t size;
ASSERT(MUTEX_HELD(&db->db_mtx));
DBUF_VERIFY(db);
/*
* Remove the reference to the dbuf before removing its hold on the
* dnode so we can guarantee in dnode_move() that a referenced bonus
* buffer has a corresponding dnode hold.
*/
holds = zfs_refcount_remove(&db->db_holds, tag);
ASSERT(holds >= 0);
/*
* We can't freeze indirects if there is a possibility that they
* may be modified in the current syncing context.
*/
if (db->db_buf != NULL &&
holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
arc_buf_freeze(db->db_buf);
}
if (holds == db->db_dirtycnt &&
db->db_level == 0 && db->db_user_immediate_evict)
dbuf_evict_user(db);
if (holds == 0) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn;
boolean_t evict_dbuf = db->db_pending_evict;
/*
* If the dnode moves here, we cannot cross this
* barrier until the move completes.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
atomic_dec_32(&dn->dn_dbufs_count);
/*
* Decrementing the dbuf count means that the bonus
* buffer's dnode hold is no longer discounted in
* dnode_move(). The dnode cannot move until after
* the dnode_rele() below.
*/
DB_DNODE_EXIT(db);
/*
* Do not reference db after its lock is dropped.
* Another thread may evict it.
*/
mutex_exit(&db->db_mtx);
if (evict_dbuf)
dnode_evict_bonus(dn);
dnode_rele(dn, db);
} else if (db->db_buf == NULL) {
/*
* This is a special case: we never associated this
* dbuf with any data allocated from the ARC.
*/
ASSERT(db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL);
dbuf_destroy(db);
} else if (arc_released(db->db_buf)) {
/*
* This dbuf has anonymous data associated with it.
*/
dbuf_destroy(db);
} else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
db->db_pending_evict) {
dbuf_destroy(db);
} else if (!multilist_link_active(&db->db_cache_link)) {
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
dbuf_cached_state_t dcs =
dbuf_include_in_metadata_cache(db) ?
DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
db->db_caching_status = dcs;
multilist_insert(&dbuf_caches[dcs].cache, db);
uint64_t db_size = db->db.db_size;
uint64_t dbu_size = dmu_buf_user_size(&db->db);
(void) zfs_refcount_add_many(
&dbuf_caches[dcs].size, db_size, db);
size = zfs_refcount_add_many(
&dbuf_caches[dcs].size, dbu_size, db->db_user);
uint8_t db_level = db->db_level;
mutex_exit(&db->db_mtx);
if (dcs == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMP(metadata_cache_count);
DBUF_STAT_MAX(metadata_cache_size_bytes_max,
size);
} else {
DBUF_STAT_BUMP(cache_count);
DBUF_STAT_MAX(cache_size_bytes_max, size);
DBUF_STAT_BUMP(cache_levels[db_level]);
DBUF_STAT_INCR(cache_levels_bytes[db_level],
db_size + dbu_size);
}
if (dcs == DB_DBUF_CACHE && !evicting)
dbuf_evict_notify(size);
}
} else {
mutex_exit(&db->db_mtx);
}
}
#pragma weak dmu_buf_refcount = dbuf_refcount
uint64_t
dbuf_refcount(dmu_buf_impl_t *db)
{
return (zfs_refcount_count(&db->db_holds));
}
uint64_t
dmu_buf_user_refcount(dmu_buf_t *db_fake)
{
uint64_t holds;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
mutex_exit(&db->db_mtx);
return (holds);
}
void *
dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
dmu_buf_user_t *new_user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
dbuf_verify_user(db, DBVU_NOT_EVICTING);
if (db->db_user == old_user)
db->db_user = new_user;
else
old_user = db->db_user;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
mutex_exit(&db->db_mtx);
return (old_user);
}
void *
dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, NULL, user));
}
void *
dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
db->db_user_immediate_evict = TRUE;
return (dmu_buf_set_user(db_fake, user));
}
void *
dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, user, NULL));
}
void *
dmu_buf_get_user(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
return (db->db_user);
}
uint64_t
dmu_buf_user_size(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
if (db->db_user == NULL)
return (0);
return (atomic_load_64(&db->db_user->dbu_size));
}
void
dmu_buf_add_user_size(dmu_buf_t *db_fake, uint64_t nadd)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
ASSERT3P(db->db_user, !=, NULL);
ASSERT3U(atomic_load_64(&db->db_user->dbu_size), <, UINT64_MAX - nadd);
atomic_add_64(&db->db_user->dbu_size, nadd);
}
void
dmu_buf_sub_user_size(dmu_buf_t *db_fake, uint64_t nsub)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
ASSERT3P(db->db_user, !=, NULL);
ASSERT3U(atomic_load_64(&db->db_user->dbu_size), >=, nsub);
atomic_sub_64(&db->db_user->dbu_size, nsub);
}
void
dmu_buf_user_evict_wait(void)
{
taskq_wait(dbu_evict_taskq);
}
blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_blkptr);
}
objset_t *
dmu_buf_get_objset(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_objset);
}
static void
dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
{
/* ASSERT(dmu_tx_is_syncing(tx) */
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_blkptr != NULL)
return;
if (db->db_blkid == DMU_SPILL_BLKID) {
db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
BP_ZERO(db->db_blkptr);
return;
}
if (db->db_level == dn->dn_phys->dn_nlevels-1) {
/*
* This buffer was allocated at a time when there was
* no available blkptrs from the dnode, or it was
* inappropriate to hook it in (i.e., nlevels mismatch).
*/
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
ASSERT(db->db_parent == NULL);
db->db_parent = dn->dn_dbuf;
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
DBUF_VERIFY(db);
} else {
dmu_buf_impl_t *parent = db->db_parent;
int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT(dn->dn_phys->dn_nlevels > 1);
if (parent == NULL) {
mutex_exit(&db->db_mtx);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, db);
rw_exit(&dn->dn_struct_rwlock);
mutex_enter(&db->db_mtx);
db->db_parent = parent;
}
db->db_blkptr = (blkptr_t *)parent->db.db_data +
(db->db_blkid & ((1ULL << epbs) - 1));
DBUF_VERIFY(db);
}
}
static void
dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
void *data = dr->dt.dl.dr_data;
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_blkid == DMU_BONUS_BLKID);
ASSERT(data != NULL);
dnode_t *dn = dr->dr_dnode;
ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
dbuf_sync_leaf_verify_bonus_dnode(dr);
dbuf_undirty_bonus(dr);
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
}
/*
* When syncing out a blocks of dnodes, adjust the block to deal with
* encryption. Normally, we make sure the block is decrypted before writing
* it. If we have crypt params, then we are writing a raw (encrypted) block,
* from a raw receive. In this case, set the ARC buf's crypt params so
* that the BP will be filled with the correct byteorder, salt, iv, and mac.
*/
static void
dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
{
int err;
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0);
if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
zbookmark_phys_t zb;
/*
* Unfortunately, there is currently no mechanism for
* syncing context to handle decryption errors. An error
* here is only possible if an attacker maliciously
* changed a dnode block and updated the associated
* checksums going up the block tree.
*/
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
err = arc_untransform(db->db_buf, db->db_objset->os_spa,
&zb, B_TRUE);
if (err)
panic("Invalid dnode block MAC");
} else if (dr->dt.dl.dr_has_raw_params) {
(void) arc_release(dr->dt.dl.dr_data, db);
arc_convert_to_raw(dr->dt.dl.dr_data,
dmu_objset_id(db->db_objset),
dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
}
}
/*
* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
* is critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
ASSERT(db->db_level > 0);
DBUF_VERIFY(db);
/* Read the block if it hasn't been read yet. */
if (db->db_buf == NULL) {
mutex_exit(&db->db_mtx);
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
mutex_enter(&db->db_mtx);
}
ASSERT3U(db->db_state, ==, DB_CACHED);
ASSERT(db->db_buf != NULL);
/* Indirect block size must match what the dnode thinks it is. */
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
dbuf_check_blkptr(dn, db);
/* Provide the pending dirty record to child dbufs */
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, db->db_buf, tx);
zio_t *zio = dr->dr_zio;
mutex_enter(&dr->dt.di.dr_mtx);
dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
mutex_exit(&dr->dt.di.dr_mtx);
zio_nowait(zio);
}
/*
* Verify that the size of the data in our bonus buffer does not exceed
* its recorded size.
*
* The purpose of this verification is to catch any cases in development
* where the size of a phys structure (i.e space_map_phys_t) grows and,
* due to incorrect feature management, older pools expect to read more
* data even though they didn't actually write it to begin with.
*
* For a example, this would catch an error in the feature logic where we
* open an older pool and we expect to write the space map histogram of
* a space map with size SPACE_MAP_SIZE_V0.
*/
static void
dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
{
#ifdef ZFS_DEBUG
dnode_t *dn = dr->dr_dnode;
/*
* Encrypted bonus buffers can have data past their bonuslen.
* Skip the verification of these blocks.
*/
if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
return;
uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(bonuslen, <=, maxbonuslen);
arc_buf_t *datap = dr->dt.dl.dr_data;
char *datap_end = ((char *)datap) + bonuslen;
char *datap_max = ((char *)datap) + maxbonuslen;
/* ensure that everything is zero after our data */
for (; datap_end < datap_max; datap_end++)
ASSERT(*datap_end == 0);
#endif
}
static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
{
/* This must be a lightweight dirty record. */
ASSERT3P(dr->dr_dbuf, ==, NULL);
dnode_t *dn = dr->dr_dnode;
if (dn->dn_phys->dn_nlevels == 1) {
VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
} else {
dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
VERIFY3U(parent_db->db_level, ==, 1);
VERIFY3P(DB_DNODE(parent_db), ==, dn);
VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
blkptr_t *bp = parent_db->db.db_data;
return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
}
}
static void
dbuf_lightweight_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
blkptr_t *bp = zio->io_bp;
if (zio->io_error != 0)
return;
dnode_t *dn = dr->dr_dnode;
blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
spa_t *spa = dmu_objset_spa(dn->dn_objset);
int64_t delta = bp_get_dsize_sync(spa, bp) -
bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta);
uint64_t blkid = dr->dt.dll.dr_blkid;
mutex_enter(&dn->dn_mtx);
if (blkid > dn->dn_phys->dn_maxblkid) {
ASSERT0(dn->dn_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = blkid;
}
mutex_exit(&dn->dn_mtx);
if (!BP_IS_EMBEDDED(bp)) {
uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
BP_SET_FILL(bp, fill);
}
dmu_buf_impl_t *parent_db;
EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
if (dr->dr_parent == NULL) {
parent_db = dn->dn_dbuf;
} else {
parent_db = dr->dr_parent->dr_dbuf;
}
rw_enter(&parent_db->db_rwlock, RW_WRITER);
*bp_orig = *bp;
rw_exit(&parent_db->db_rwlock);
}
static void
dbuf_lightweight_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
VERIFY0(zio->io_error);
objset_t *os = dr->dr_dnode->dn_objset;
dmu_tx_t *tx = os->os_synctx;
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, zio->io_bp, tx);
}
dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
zio->io_txg);
abd_free(dr->dt.dll.dr_abd);
kmem_free(dr, sizeof (*dr));
}
noinline static void
dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dnode_t *dn = dr->dr_dnode;
zio_t *pio;
if (dn->dn_phys->dn_nlevels == 1) {
pio = dn->dn_zio;
} else {
pio = dr->dr_parent->dr_zio;
}
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(dn->dn_objset),
.zb_object = dn->dn_object,
.zb_level = 0,
.zb_blkid = dr->dt.dll.dr_blkid,
};
/*
* See comment in dbuf_write(). This is so that zio->io_bp_orig
* will have the old BP in dbuf_lightweight_done().
*/
dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
&dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
zio_nowait(dr->dr_zio);
}
/*
* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
* critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
uint64_t txg = tx->tx_txg;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
/*
* To be synced, we must be dirtied. But we might have been freed
* after the dirty.
*/
if (db->db_state == DB_UNCACHED) {
/* This buffer has been freed since it was dirtied */
ASSERT3P(db->db.db_data, ==, NULL);
} else if (db->db_state == DB_FILL) {
/* This buffer was freed and is now being re-filled */
ASSERT(db->db.db_data != dr->dt.dl.dr_data);
} else if (db->db_state == DB_READ) {
/*
* This buffer was either cloned or had a Direct I/O write
* occur and has an in-flgiht read on the BP. It is safe to
* issue the write here, because the read has already been
* issued and the contents won't change.
*
* We can verify the case of both the clone and Direct I/O
* write by making sure the first dirty record for the dbuf
* has no ARC buffer associated with it.
*/
dbuf_dirty_record_t *dr_head =
list_head(&db->db_dirty_records);
ASSERT3P(db->db_buf, ==, NULL);
ASSERT3P(db->db.db_data, ==, NULL);
ASSERT3P(dr_head->dt.dl.dr_data, ==, NULL);
ASSERT3U(dr_head->dt.dl.dr_override_state, ==, DR_OVERRIDDEN);
} else {
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
}
DBUF_VERIFY(db);
if (db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
/*
* In the previous transaction group, the bonus buffer
* was entirely used to store the attributes for the
* dnode which overrode the dn_spill field. However,
* when adding more attributes to the file a spill
* block was required to hold the extra attributes.
*
* Make sure to clear the garbage left in the dn_spill
* field from the previous attributes in the bonus
* buffer. Otherwise, after writing out the spill
* block to the new allocated dva, it will free
* the old block pointed to by the invalid dn_spill.
*/
db->db_blkptr = NULL;
}
dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
mutex_exit(&dn->dn_mtx);
}
/*
* If this is a bonus buffer, simply copy the bonus data into the
* dnode. It will be written out when the dnode is synced (and it
* will be synced, since it must have been dirty for dbuf_sync to
* be called).
*/
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dr->dr_dbuf == db);
dbuf_sync_bonus(dr, tx);
return;
}
os = dn->dn_objset;
/*
* This function may have dropped the db_mtx lock allowing a dmu_sync
* operation to sneak in. As a result, we need to ensure that we
* don't check the dr_override_state until we have returned from
* dbuf_check_blkptr.
*/
dbuf_check_blkptr(dn, db);
/*
* If this buffer is in the middle of an immediate write, wait for the
* synchronous IO to complete.
*
* This is also valid even with Direct I/O writes setting a dirty
* records override state into DR_IN_DMU_SYNC, because all
* Direct I/O writes happen in open-context.
*/
while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
cv_wait(&db->db_changed, &db->db_mtx);
}
/*
* If this is a dnode block, ensure it is appropriately encrypted
* or decrypted, depending on what we are writing to it this txg.
*/
if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
dbuf_prepare_encrypted_dnode_leaf(dr);
if (*datap != NULL && *datap == db->db_buf &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
zfs_refcount_count(&db->db_holds) > 1) {
/*
* If this buffer is currently "in use" (i.e., there
* are active holds and db_data still references it),
* then make a copy before we start the write so that
* any modifications from the open txg will not leak
* into this write.
*
* NOTE: this copy does not need to be made for
* objects only modified in the syncing context (e.g.
* DNONE_DNODE blocks).
*/
int psize = arc_buf_size(*datap);
int lsize = arc_buf_lsize(*datap);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
enum zio_compress compress_type = arc_get_compression(*datap);
uint8_t complevel = arc_get_complevel(*datap);
if (arc_is_encrypted(*datap)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
*datap = arc_alloc_raw_buf(os->os_spa, db,
dmu_objset_id(os), byteorder, salt, iv, mac,
dn->dn_type, psize, lsize, compress_type,
complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
*datap = arc_alloc_compressed_buf(os->os_spa, db,
psize, lsize, compress_type, complevel);
} else {
*datap = arc_alloc_buf(os->os_spa, db, type, psize);
}
memcpy((*datap)->b_data, db->db.db_data, psize);
}
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, *datap, tx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
if (dn->dn_object == DMU_META_DNODE_OBJECT) {
list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
} else {
zio_nowait(dr->dr_zio);
}
}
/*
* Syncs out a range of dirty records for indirect or leaf dbufs. May be
* called recursively from dbuf_sync_indirect().
*/
void
dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
while ((dr = list_head(list))) {
if (dr->dr_zio != NULL) {
/*
* If we find an already initialized zio then we
* are processing the meta-dnode, and we have finished.
* The dbufs for all dnodes are put back on the list
* during processing, so that we can zio_wait()
* these IOs after initiating all child IOs.
*/
ASSERT3U(dr->dr_dbuf->db.db_object, ==,
DMU_META_DNODE_OBJECT);
break;
}
list_remove(list, dr);
if (dr->dr_dbuf == NULL) {
dbuf_sync_lightweight(dr, tx);
} else {
if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
VERIFY3U(dr->dr_dbuf->db_level, ==, level);
}
if (dr->dr_dbuf->db_level > 0)
dbuf_sync_indirect(dr, tx);
else
dbuf_sync_leaf(dr, tx);
}
}
}
static void
dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
spa_t *spa = zio->io_spa;
int64_t delta;
uint64_t fill = 0;
int i;
ASSERT3P(db->db_blkptr, !=, NULL);
ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
zio->io_prev_space_delta = delta;
if (BP_GET_LOGICAL_BIRTH(bp) != 0) {
ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_type) ||
(db->db_blkid == DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_bonustype) ||
BP_IS_EMBEDDED(bp));
ASSERT(BP_GET_LEVEL(bp) == db->db_level);
}
mutex_enter(&db->db_mtx);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(bp)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
mutex_enter(&dn->dn_mtx);
if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
db->db_blkid != DMU_SPILL_BLKID) {
ASSERT0(db->db_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = db->db_blkid;
}
mutex_exit(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_DNODE) {
i = 0;
while (i < db->db.db_size) {
dnode_phys_t *dnp =
(void *)(((char *)db->db.db_data) + i);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE) {
fill++;
for (int j = 0; j < dnp->dn_nblkptr;
j++) {
(void) zfs_blkptr_verify(spa,
&dnp->dn_blkptr[j],
BLK_CONFIG_SKIP,
BLK_VERIFY_HALT);
}
if (dnp->dn_flags &
DNODE_FLAG_SPILL_BLKPTR) {
(void) zfs_blkptr_verify(spa,
DN_SPILL_BLKPTR(dnp),
BLK_CONFIG_SKIP,
BLK_VERIFY_HALT);
}
i += dnp->dn_extra_slots *
DNODE_MIN_SIZE;
}
}
} else {
if (BP_IS_HOLE(bp)) {
fill = 0;
} else {
fill = 1;
}
}
} else {
blkptr_t *ibp = db->db.db_data;
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
if (BP_IS_HOLE(ibp))
continue;
(void) zfs_blkptr_verify(spa, ibp,
BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
fill += BP_GET_FILL(ibp);
}
}
DB_DNODE_EXIT(db);
if (!BP_IS_EMBEDDED(bp))
BP_SET_FILL(bp, fill);
mutex_exit(&db->db_mtx);
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
*db->db_blkptr = *bp;
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/*
* This function gets called just prior to running through the compression
* stage of the zio pipeline. If we're an indirect block comprised of only
* holes, then we want this indirect to be compressed away to a hole. In
* order to do that we must zero out any information about the holes that
* this indirect points to prior to before we try to compress it.
*/
static void
dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) zio, (void) buf;
dmu_buf_impl_t *db = vdb;
blkptr_t *bp;
unsigned int epbs, i;
ASSERT3U(db->db_level, >, 0);
DB_DNODE_ENTER(db);
epbs = DB_DNODE(db)->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
DB_DNODE_EXIT(db);
ASSERT3U(epbs, <, 31);
/* Determine if all our children are holes */
for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
if (!BP_IS_HOLE(bp))
break;
}
/*
* If all the children are holes, then zero them all out so that
* we may get compressed away.
*/
if (i == 1ULL << epbs) {
/*
* We only found holes. Grab the rwlock to prevent
* anybody from reading the blocks we're about to
* zero out.
*/
rw_enter(&db->db_rwlock, RW_WRITER);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
}
}
static void
dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) buf;
dmu_buf_impl_t *db = vdb;
blkptr_t *bp_orig = &zio->io_bp_orig;
blkptr_t *bp = db->db_blkptr;
objset_t *os = db->db_objset;
dmu_tx_t *tx = os->os_synctx;
ASSERT0(zio->io_error);
ASSERT(db->db_blkptr == bp);
/*
* For nopwrites and rewrites we ensure that the bp matches our
* original and bypass all the accounting.
*/
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
dbuf_dirty_record_t *dr = db->db_data_pending;
dnode_t *dn = dr->dr_dnode;
ASSERT(!list_link_active(&dr->dr_dirty_node));
ASSERT(dr->dr_dbuf == db);
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
/* no dr_data if this is a NO_FILL or Direct I/O */
if (dr->dt.dl.dr_data != NULL &&
dr->dt.dl.dr_data != db->db_buf) {
ASSERT3B(dr->dt.dl.dr_brtwrite, ==, B_FALSE);
ASSERT3B(dr->dt.dl.dr_diowrite, ==, B_FALSE);
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
} else {
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
if (!BP_IS_HOLE(db->db_blkptr)) {
int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_blkid, <=,
dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
db->db.db_size);
}
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
cv_broadcast(&db->db_changed);
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
db->db_data_pending = NULL;
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
zio->io_txg);
kmem_cache_free(dbuf_dirty_kmem_cache, dr);
}
static void
dbuf_write_nofill_ready(zio_t *zio)
{
dbuf_write_ready(zio, NULL, zio->io_private);
}
static void
dbuf_write_nofill_done(zio_t *zio)
{
dbuf_write_done(zio, NULL, zio->io_private);
}
static void
dbuf_write_override_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
dbuf_write_ready(zio, NULL, db);
}
static void
dbuf_write_override_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
mutex_enter(&db->db_mtx);
if (!BP_EQUAL(zio->io_bp, obp)) {
if (!BP_IS_HOLE(obp))
dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
arc_release(dr->dt.dl.dr_data, db);
}
mutex_exit(&db->db_mtx);
dbuf_write_done(zio, NULL, db);
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
typedef struct dbuf_remap_impl_callback_arg {
objset_t *drica_os;
uint64_t drica_blk_birth;
dmu_tx_t *drica_tx;
} dbuf_remap_impl_callback_arg_t;
static void
dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
void *arg)
{
dbuf_remap_impl_callback_arg_t *drica = arg;
objset_t *os = drica->drica_os;
spa_t *spa = dmu_objset_spa(os);
dmu_tx_t *tx = drica->drica_tx;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (os == spa_meta_objset(spa)) {
spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
} else {
dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
size, drica->drica_blk_birth, tx);
}
}
static void
dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
{
blkptr_t bp_copy = *bp;
spa_t *spa = dmu_objset_spa(dn->dn_objset);
dbuf_remap_impl_callback_arg_t drica;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
drica.drica_os = dn->dn_objset;
drica.drica_blk_birth = BP_GET_LOGICAL_BIRTH(bp);
drica.drica_tx = tx;
if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
&drica)) {
/*
* If the blkptr being remapped is tracked by a livelist,
* then we need to make sure the livelist reflects the update.
* First, cancel out the old blkptr by appending a 'FREE'
* entry. Next, add an 'ALLOC' to track the new version. This
* way we avoid trying to free an inaccurate blkptr at delete.
* Note that embedded blkptrs are not tracked in livelists.
*/
if (dn->dn_objset != spa_meta_objset(spa)) {
dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
BP_GET_LOGICAL_BIRTH(bp) >
ds->ds_dir->dd_origin_txg) {
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_frees,
bp);
bplist_append(&ds->ds_dir->dd_pending_allocs,
&bp_copy);
}
}
/*
* The db_rwlock prevents dbuf_read_impl() from
* dereferencing the BP while we are changing it. To
* avoid lock contention, only grab it when we are actually
* changing the BP.
*/
if (rw != NULL)
rw_enter(rw, RW_WRITER);
*bp = bp_copy;
if (rw != NULL)
rw_exit(rw);
}
}
/*
* Remap any existing BP's to concrete vdevs, if possible.
*/
static void
dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(db->db_objset);
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
return;
if (db->db_level > 0) {
blkptr_t *bp = db->db.db_data;
for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
}
} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
dnode_phys_t *dnp = db->db.db_data;
ASSERT3U(dn->dn_type, ==, DMU_OT_DNODE);
for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
i += dnp[i].dn_extra_slots + 1) {
for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
&dn->dn_dbuf->db_rwlock);
dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
tx);
}
}
}
}
/*
* Populate dr->dr_zio with a zio to commit a dirty buffer to disk.
* Caller is responsible for issuing the zio_[no]wait(dr->dr_zio).
*/
static void
dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
dmu_buf_impl_t *parent = db->db_parent;
uint64_t txg = tx->tx_txg;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *pio; /* parent I/O */
int wp_flag = 0;
ASSERT(dmu_tx_is_syncing(tx));
os = dn->dn_objset;
if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
/*
* Private object buffers are released here rather than in
* dbuf_dirty() since they are only modified in the syncing
* context and we don't want the overhead of making multiple
* copies of the data.
*/
if (BP_IS_HOLE(db->db_blkptr))
arc_buf_thaw(data);
else
dbuf_release_bp(db);
dbuf_remap(dn, db, tx);
}
if (parent != dn->dn_dbuf) {
/* Our parent is an indirect block. */
/* We have a dirty parent that has been scheduled for write. */
ASSERT(parent && parent->db_data_pending);
/* Our parent's buffer is one level closer to the dnode. */
ASSERT(db->db_level == parent->db_level-1);
/*
* We're about to modify our parent's db_data by modifying
* our block pointer, so the parent must be released.
*/
ASSERT(arc_released(parent->db_buf));
pio = parent->db_data_pending->dr_zio;
} else {
/* Our parent is the dnode itself. */
ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
db->db_blkid != DMU_SPILL_BLKID) ||
(db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
pio = dn->dn_zio;
}
ASSERT(db->db_level == 0 || data == db->db_buf);
ASSERT3U(BP_GET_LOGICAL_BIRTH(db->db_blkptr), <=, txg);
ASSERT(pio);
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
db->db.db_object, db->db_level, db->db_blkid);
if (db->db_blkid == DMU_SPILL_BLKID)
wp_flag = WP_SPILL;
wp_flag |= (data == NULL) ? WP_NOFILL : 0;
dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
/*
* We copy the blkptr now (rather than when we instantiate the dirty
* record), because its value can change between open context and
* syncing context. We do not need to hold dn_struct_rwlock to read
* db_blkptr because we are in syncing context.
*/
dr->dr_bp_copy = *db->db_blkptr;
if (db->db_level == 0 &&
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* The BP for this block has been provided by open context
* (by dmu_sync(), dmu_write_direct(),
* or dmu_buf_write_embedded()).
*/
abd_t *contents = (data != NULL) ?
abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
contents, db->db.db_size, db->db.db_size, &zp,
dbuf_write_override_ready, NULL,
dbuf_write_override_done,
dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
mutex_enter(&db->db_mtx);
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
dr->dt.dl.dr_brtwrite);
mutex_exit(&db->db_mtx);
} else if (data == NULL) {
ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
dr->dr_zio = zio_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
dbuf_write_nofill_ready, NULL,
dbuf_write_nofill_done, db,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
} else {
ASSERT(arc_released(data));
/*
* For indirect blocks, we want to setup the children
* ready callback so that we can properly handle an indirect
* block that only contains holes.
*/
arc_write_done_func_t *children_ready_cb = NULL;
if (db->db_level != 0)
children_ready_cb = dbuf_write_children_ready;
dr->dr_zio = arc_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
dbuf_is_l2cacheable(db, NULL), &zp, dbuf_write_ready,
children_ready_cb, dbuf_write_done, db,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
}
}
EXPORT_SYMBOL(dbuf_find);
EXPORT_SYMBOL(dbuf_is_metadata);
EXPORT_SYMBOL(dbuf_destroy);
EXPORT_SYMBOL(dbuf_loan_arcbuf);
EXPORT_SYMBOL(dbuf_whichblock);
EXPORT_SYMBOL(dbuf_read);
EXPORT_SYMBOL(dbuf_unoverride);
EXPORT_SYMBOL(dbuf_free_range);
EXPORT_SYMBOL(dbuf_new_size);
EXPORT_SYMBOL(dbuf_release_bp);
EXPORT_SYMBOL(dbuf_dirty);
EXPORT_SYMBOL(dmu_buf_set_crypt_params);
EXPORT_SYMBOL(dmu_buf_will_dirty);
EXPORT_SYMBOL(dmu_buf_is_dirty);
EXPORT_SYMBOL(dmu_buf_will_clone_or_dio);
EXPORT_SYMBOL(dmu_buf_will_not_fill);
EXPORT_SYMBOL(dmu_buf_will_fill);
EXPORT_SYMBOL(dmu_buf_fill_done);
EXPORT_SYMBOL(dmu_buf_rele);
EXPORT_SYMBOL(dbuf_assign_arcbuf);
EXPORT_SYMBOL(dbuf_prefetch);
EXPORT_SYMBOL(dbuf_hold_impl);
EXPORT_SYMBOL(dbuf_hold);
EXPORT_SYMBOL(dbuf_hold_level);
EXPORT_SYMBOL(dbuf_create_bonus);
EXPORT_SYMBOL(dbuf_spill_set_blksz);
EXPORT_SYMBOL(dbuf_rm_spill);
EXPORT_SYMBOL(dbuf_add_ref);
EXPORT_SYMBOL(dbuf_rele);
EXPORT_SYMBOL(dbuf_rele_and_unlock);
EXPORT_SYMBOL(dbuf_refcount);
EXPORT_SYMBOL(dbuf_sync_list);
EXPORT_SYMBOL(dmu_buf_set_user);
EXPORT_SYMBOL(dmu_buf_set_user_ie);
EXPORT_SYMBOL(dmu_buf_get_user);
EXPORT_SYMBOL(dmu_buf_get_blkptr);
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
"Maximum size in bytes of the dbuf cache.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
"Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
"Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
"Maximum size in bytes of dbuf metadata cache.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
"Set size of dbuf cache to log2 fraction of arc size.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
"Set size of dbuf metadata cache to log2 fraction of arc size.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
"Set size of dbuf cache mutex array as log2 shift.");
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index ecc6761f8fa4..ce2c79dbfaa3 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -1,2765 +1,2765 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_project.h>
dnode_stats_t dnode_stats = {
{ "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
{ "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
{ "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_allocate", KSTAT_DATA_UINT64 },
{ "dnode_reallocate", KSTAT_DATA_UINT64 },
{ "dnode_buf_evict", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
{ "dnode_alloc_race", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
{ "dnode_move_invalid", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck1", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck2", KSTAT_DATA_UINT64 },
{ "dnode_move_special", KSTAT_DATA_UINT64 },
{ "dnode_move_handle", KSTAT_DATA_UINT64 },
{ "dnode_move_rwlock", KSTAT_DATA_UINT64 },
{ "dnode_move_active", KSTAT_DATA_UINT64 },
};
dnode_sums_t dnode_sums;
static kstat_t *dnode_ksp;
static kmem_cache_t *dnode_cache;
static dnode_phys_t dnode_phys_zero __maybe_unused;
int zfs_default_bs = SPA_MINBLOCKSHIFT;
int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
#ifdef _KERNEL
static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
#endif /* _KERNEL */
static int
dbuf_compare(const void *x1, const void *x2)
{
const dmu_buf_impl_t *d1 = x1;
const dmu_buf_impl_t *d2 = x2;
int cmp = TREE_CMP(d1->db_level, d2->db_level);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(d1->db_blkid, d2->db_blkid);
if (likely(cmp))
return (cmp);
if (d1->db_state == DB_MARKER) {
ASSERT3S(d2->db_state, !=, DB_MARKER);
return (TREE_PCMP(d1->db_parent, d2));
} else if (d2->db_state == DB_MARKER) {
ASSERT3S(d1->db_state, !=, DB_MARKER);
return (TREE_PCMP(d1, d2->db_parent));
}
if (d1->db_state == DB_SEARCH) {
ASSERT3S(d2->db_state, !=, DB_SEARCH);
return (-1);
} else if (d2->db_state == DB_SEARCH) {
ASSERT3S(d1->db_state, !=, DB_SEARCH);
return (1);
}
return (TREE_PCMP(d1, d2));
}
static int
dnode_cons(void *arg, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
dnode_t *dn = arg;
rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
cv_init(&dn->dn_nodnholds, NULL, CV_DEFAULT, NULL);
/*
* Every dbuf has a reference, and dropping a tracked reference is
* O(number of references), so don't track dn_holds.
*/
zfs_refcount_create_untracked(&dn->dn_holds);
zfs_refcount_create(&dn->dn_tx_holds);
list_link_init(&dn->dn_link);
memset(dn->dn_next_type, 0, sizeof (dn->dn_next_type));
memset(dn->dn_next_nblkptr, 0, sizeof (dn->dn_next_nblkptr));
memset(dn->dn_next_nlevels, 0, sizeof (dn->dn_next_nlevels));
memset(dn->dn_next_indblkshift, 0, sizeof (dn->dn_next_indblkshift));
memset(dn->dn_next_bonustype, 0, sizeof (dn->dn_next_bonustype));
memset(dn->dn_rm_spillblk, 0, sizeof (dn->dn_rm_spillblk));
memset(dn->dn_next_bonuslen, 0, sizeof (dn->dn_next_bonuslen));
memset(dn->dn_next_blksz, 0, sizeof (dn->dn_next_blksz));
memset(dn->dn_next_maxblkid, 0, sizeof (dn->dn_next_maxblkid));
for (int i = 0; i < TXG_SIZE; i++) {
multilist_link_init(&dn->dn_dirty_link[i]);
dn->dn_free_ranges[i] = NULL;
list_create(&dn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_bonus = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_zio = NULL;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dn->dn_dbufs_count = 0;
avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
dn->dn_moved = 0;
return (0);
}
static void
dnode_dest(void *arg, void *unused)
{
(void) unused;
dnode_t *dn = arg;
rw_destroy(&dn->dn_struct_rwlock);
mutex_destroy(&dn->dn_mtx);
mutex_destroy(&dn->dn_dbufs_mtx);
cv_destroy(&dn->dn_notxholds);
cv_destroy(&dn->dn_nodnholds);
zfs_refcount_destroy(&dn->dn_holds);
zfs_refcount_destroy(&dn->dn_tx_holds);
ASSERT(!list_link_active(&dn->dn_link));
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
list_destroy(&dn->dn_dirty_records[i]);
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
}
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_free_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT0(dn->dn_dirty_txg);
ASSERT0(dn->dn_dirtyctx);
ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
ASSERT3P(dn->dn_bonus, ==, NULL);
ASSERT(!dn->dn_have_spill);
ASSERT3P(dn->dn_zio, ==, NULL);
ASSERT0(dn->dn_oldused);
ASSERT0(dn->dn_oldflags);
ASSERT0(dn->dn_olduid);
ASSERT0(dn->dn_oldgid);
ASSERT0(dn->dn_oldprojid);
ASSERT0(dn->dn_newuid);
ASSERT0(dn->dn_newgid);
ASSERT0(dn->dn_newprojid);
ASSERT0(dn->dn_id_flags);
ASSERT0(dn->dn_dbufs_count);
avl_destroy(&dn->dn_dbufs);
}
static int
dnode_kstats_update(kstat_t *ksp, int rw)
{
dnode_stats_t *ds = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
ds->dnode_hold_dbuf_hold.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_dbuf_hold);
ds->dnode_hold_dbuf_read.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_dbuf_read);
ds->dnode_hold_alloc_hits.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_hits);
ds->dnode_hold_alloc_misses.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_misses);
ds->dnode_hold_alloc_interior.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_interior);
ds->dnode_hold_alloc_lock_retry.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_lock_retry);
ds->dnode_hold_alloc_lock_misses.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_lock_misses);
ds->dnode_hold_alloc_type_none.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_alloc_type_none);
ds->dnode_hold_free_hits.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_hits);
ds->dnode_hold_free_misses.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_misses);
ds->dnode_hold_free_lock_misses.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_lock_misses);
ds->dnode_hold_free_lock_retry.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_lock_retry);
ds->dnode_hold_free_refcount.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_refcount);
ds->dnode_hold_free_overflow.value.ui64 =
wmsum_value(&dnode_sums.dnode_hold_free_overflow);
ds->dnode_free_interior_lock_retry.value.ui64 =
wmsum_value(&dnode_sums.dnode_free_interior_lock_retry);
ds->dnode_allocate.value.ui64 =
wmsum_value(&dnode_sums.dnode_allocate);
ds->dnode_reallocate.value.ui64 =
wmsum_value(&dnode_sums.dnode_reallocate);
ds->dnode_buf_evict.value.ui64 =
wmsum_value(&dnode_sums.dnode_buf_evict);
ds->dnode_alloc_next_chunk.value.ui64 =
wmsum_value(&dnode_sums.dnode_alloc_next_chunk);
ds->dnode_alloc_race.value.ui64 =
wmsum_value(&dnode_sums.dnode_alloc_race);
ds->dnode_alloc_next_block.value.ui64 =
wmsum_value(&dnode_sums.dnode_alloc_next_block);
ds->dnode_move_invalid.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_invalid);
ds->dnode_move_recheck1.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_recheck1);
ds->dnode_move_recheck2.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_recheck2);
ds->dnode_move_special.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_special);
ds->dnode_move_handle.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_handle);
ds->dnode_move_rwlock.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_rwlock);
ds->dnode_move_active.value.ui64 =
wmsum_value(&dnode_sums.dnode_move_active);
return (0);
}
void
dnode_init(void)
{
ASSERT(dnode_cache == NULL);
dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
0, dnode_cons, dnode_dest, NULL, NULL, NULL, KMC_RECLAIMABLE);
kmem_cache_set_move(dnode_cache, dnode_move);
wmsum_init(&dnode_sums.dnode_hold_dbuf_hold, 0);
wmsum_init(&dnode_sums.dnode_hold_dbuf_read, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_hits, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_misses, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_interior, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_lock_retry, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_lock_misses, 0);
wmsum_init(&dnode_sums.dnode_hold_alloc_type_none, 0);
wmsum_init(&dnode_sums.dnode_hold_free_hits, 0);
wmsum_init(&dnode_sums.dnode_hold_free_misses, 0);
wmsum_init(&dnode_sums.dnode_hold_free_lock_misses, 0);
wmsum_init(&dnode_sums.dnode_hold_free_lock_retry, 0);
wmsum_init(&dnode_sums.dnode_hold_free_refcount, 0);
wmsum_init(&dnode_sums.dnode_hold_free_overflow, 0);
wmsum_init(&dnode_sums.dnode_free_interior_lock_retry, 0);
wmsum_init(&dnode_sums.dnode_allocate, 0);
wmsum_init(&dnode_sums.dnode_reallocate, 0);
wmsum_init(&dnode_sums.dnode_buf_evict, 0);
wmsum_init(&dnode_sums.dnode_alloc_next_chunk, 0);
wmsum_init(&dnode_sums.dnode_alloc_race, 0);
wmsum_init(&dnode_sums.dnode_alloc_next_block, 0);
wmsum_init(&dnode_sums.dnode_move_invalid, 0);
wmsum_init(&dnode_sums.dnode_move_recheck1, 0);
wmsum_init(&dnode_sums.dnode_move_recheck2, 0);
wmsum_init(&dnode_sums.dnode_move_special, 0);
wmsum_init(&dnode_sums.dnode_move_handle, 0);
wmsum_init(&dnode_sums.dnode_move_rwlock, 0);
wmsum_init(&dnode_sums.dnode_move_active, 0);
dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc",
KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dnode_ksp != NULL) {
dnode_ksp->ks_data = &dnode_stats;
dnode_ksp->ks_update = dnode_kstats_update;
kstat_install(dnode_ksp);
}
}
void
dnode_fini(void)
{
if (dnode_ksp != NULL) {
kstat_delete(dnode_ksp);
dnode_ksp = NULL;
}
wmsum_fini(&dnode_sums.dnode_hold_dbuf_hold);
wmsum_fini(&dnode_sums.dnode_hold_dbuf_read);
wmsum_fini(&dnode_sums.dnode_hold_alloc_hits);
wmsum_fini(&dnode_sums.dnode_hold_alloc_misses);
wmsum_fini(&dnode_sums.dnode_hold_alloc_interior);
wmsum_fini(&dnode_sums.dnode_hold_alloc_lock_retry);
wmsum_fini(&dnode_sums.dnode_hold_alloc_lock_misses);
wmsum_fini(&dnode_sums.dnode_hold_alloc_type_none);
wmsum_fini(&dnode_sums.dnode_hold_free_hits);
wmsum_fini(&dnode_sums.dnode_hold_free_misses);
wmsum_fini(&dnode_sums.dnode_hold_free_lock_misses);
wmsum_fini(&dnode_sums.dnode_hold_free_lock_retry);
wmsum_fini(&dnode_sums.dnode_hold_free_refcount);
wmsum_fini(&dnode_sums.dnode_hold_free_overflow);
wmsum_fini(&dnode_sums.dnode_free_interior_lock_retry);
wmsum_fini(&dnode_sums.dnode_allocate);
wmsum_fini(&dnode_sums.dnode_reallocate);
wmsum_fini(&dnode_sums.dnode_buf_evict);
wmsum_fini(&dnode_sums.dnode_alloc_next_chunk);
wmsum_fini(&dnode_sums.dnode_alloc_race);
wmsum_fini(&dnode_sums.dnode_alloc_next_block);
wmsum_fini(&dnode_sums.dnode_move_invalid);
wmsum_fini(&dnode_sums.dnode_move_recheck1);
wmsum_fini(&dnode_sums.dnode_move_recheck2);
wmsum_fini(&dnode_sums.dnode_move_special);
wmsum_fini(&dnode_sums.dnode_move_handle);
wmsum_fini(&dnode_sums.dnode_move_rwlock);
wmsum_fini(&dnode_sums.dnode_move_active);
kmem_cache_destroy(dnode_cache);
dnode_cache = NULL;
}
#ifdef ZFS_DEBUG
void
dnode_verify(dnode_t *dn)
{
int drop_struct_lock = FALSE;
ASSERT(dn->dn_phys);
ASSERT(dn->dn_objset);
ASSERT(dn->dn_handle->dnh_dnode == dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
return;
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
int i;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
if (dn->dn_datablkshift) {
ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
}
ASSERT3U(dn->dn_nlevels, <=, 30);
ASSERT(DMU_OT_IS_VALID(dn->dn_type));
ASSERT3U(dn->dn_nblkptr, >=, 1);
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen);
ASSERT3U(dn->dn_datablksz, ==,
dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
dn->dn_bonuslen, <=, max_bonuslen);
for (i = 0; i < TXG_SIZE; i++) {
ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
}
}
if (dn->dn_phys->dn_type != DMU_OT_NONE)
ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
if (dn->dn_dbuf != NULL) {
ASSERT3P(dn->dn_phys, ==,
(dnode_phys_t *)dn->dn_dbuf->db.db_data +
(dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
}
if (drop_struct_lock)
rw_exit(&dn->dn_struct_rwlock);
}
#endif
void
dnode_byteswap(dnode_phys_t *dnp)
{
uint64_t *buf64 = (void*)&dnp->dn_blkptr;
int i;
if (dnp->dn_type == DMU_OT_NONE) {
memset(dnp, 0, sizeof (dnode_phys_t));
return;
}
dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots);
dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
dnp->dn_used = BSWAP_64(dnp->dn_used);
/*
* dn_nblkptr is only one byte, so it's OK to read it in either
* byte order. We can't read dn_bouslen.
*/
ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
buf64[i] = BSWAP_64(buf64[i]);
/*
* OK to check dn_bonuslen for zero, because it won't matter if
* we have the wrong byte order. This is necessary because the
* dnode dnode is smaller than a regular dnode.
*/
if (dnp->dn_bonuslen != 0) {
dmu_object_byteswap_t byteswap;
ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
byteswap = DMU_OT_BYTESWAP(dnp->dn_bonustype);
dmu_ot_byteswap[byteswap].ob_func(DN_BONUS(dnp),
DN_MAX_BONUS_LEN(dnp));
}
/* Swap SPILL block if we have one */
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t));
}
void
dnode_buf_byteswap(void *vbuf, size_t size)
{
int i = 0;
ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
while (i < size) {
dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
dnode_byteswap(dnp);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE)
i += dnp->dn_extra_slots * DNODE_MIN_SIZE;
}
}
void
dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t));
if (newsize < dn->dn_bonuslen) {
/* clear any data after the end of the new size */
size_t diff = dn->dn_bonuslen - newsize;
char *data_end = ((char *)dn->dn_bonus->db.db_data) + newsize;
memset(data_end, 0, diff);
}
dn->dn_bonuslen = newsize;
if (newsize == 0)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
else
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dn->dn_bonustype = newtype;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_set_storage_type(dnode_t *dn, dmu_object_type_t newtype)
{
/*
* This is not in the dnode_phys, but it should be, and perhaps one day
* will. For now we require it be set after taking a hold.
*/
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dn->dn_storage_type = newtype;
}
void
dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
dnode_setdirty(dn, tx);
dn->dn_rm_spillblk[tx->tx_txg & TXG_MASK] = DN_KILL_SPILLBLK;
dn->dn_have_spill = B_FALSE;
}
static void
dnode_setdblksz(dnode_t *dn, int size)
{
ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
dn->dn_datablksz = size;
dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
}
static dnode_t *
dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object, dnode_handle_t *dnh)
{
dnode_t *dn;
dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
dn->dn_moved = 0;
/*
* Defer setting dn_objset until the dnode is ready to be a candidate
* for the dnode_move() callback.
*/
dn->dn_object = object;
dn->dn_dbuf = db;
dn->dn_handle = dnh;
dn->dn_phys = dnp;
if (dnp->dn_datablkszsec) {
dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
} else {
dn->dn_datablksz = 0;
dn->dn_datablkszsec = 0;
dn->dn_datablkshift = 0;
}
dn->dn_indblkshift = dnp->dn_indblkshift;
dn->dn_nlevels = dnp->dn_nlevels;
dn->dn_type = dnp->dn_type;
dn->dn_nblkptr = dnp->dn_nblkptr;
dn->dn_checksum = dnp->dn_checksum;
dn->dn_compress = dnp->dn_compress;
dn->dn_bonustype = dnp->dn_bonustype;
dn->dn_bonuslen = dnp->dn_bonuslen;
dn->dn_num_slots = dnp->dn_extra_slots + 1;
dn->dn_maxblkid = dnp->dn_maxblkid;
dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
dn->dn_id_flags = 0;
dn->dn_storage_type = DMU_OT_NONE;
dmu_zfetch_init(&dn->dn_zfetch, dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode));
mutex_enter(&os->os_lock);
/*
* Exclude special dnodes from os_dnodes so an empty os_dnodes
* signifies that the special dnodes have no references from
* their children (the entries in os_dnodes). This allows
* dnode_destroy() to easily determine if the last child has
* been removed and then complete eviction of the objset.
*/
if (!DMU_OBJECT_IS_SPECIAL(object))
list_insert_head(&os->os_dnodes, dn);
membar_producer();
/*
* Everything else must be valid before assigning dn_objset
* makes the dnode eligible for dnode_move().
*/
dn->dn_objset = os;
dnh->dnh_dnode = dn;
mutex_exit(&os->os_lock);
arc_space_consume(sizeof (dnode_t), ARC_SPACE_DNODE);
return (dn);
}
/*
* Caller must be holding the dnode handle, which is released upon return.
*/
static void
dnode_destroy(dnode_t *dn)
{
objset_t *os = dn->dn_objset;
boolean_t complete_os_eviction = B_FALSE;
ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
mutex_enter(&os->os_lock);
POINTER_INVALIDATE(&dn->dn_objset);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
list_remove(&os->os_dnodes, dn);
complete_os_eviction =
list_is_empty(&os->os_dnodes) &&
list_link_active(&os->os_evicting_node);
}
mutex_exit(&os->os_lock);
/* the dnode can no longer move, so we can release the handle */
if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock))
zrl_remove(&dn->dn_handle->dnh_zrlock);
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
if (dn->dn_bonus != NULL) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
dn->dn_bonus = NULL;
}
dn->dn_zio = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dn->dn_storage_type = DMU_OT_NONE;
dmu_zfetch_fini(&dn->dn_zfetch);
kmem_cache_free(dnode_cache, dn);
arc_space_return(sizeof (dnode_t), ARC_SPACE_DNODE);
if (complete_os_eviction)
dmu_objset_evict_done(os);
}
void
dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
{
int i;
ASSERT3U(dn_slots, >, 0);
ASSERT3U(dn_slots << DNODE_SHIFT, <=,
spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)));
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (blocksize == 0)
blocksize = 1 << zfs_default_bs;
else
blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
if (ibs == 0)
ibs = zfs_default_ibs;
ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d dn_slots=%d\n",
dn->dn_objset, (u_longlong_t)dn->dn_object,
(u_longlong_t)tx->tx_txg, blocksize, ibs, dn_slots);
DNODE_STAT_BUMP(dnode_allocate);
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT0(memcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)));
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
ASSERT(ot != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(ot));
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0) ||
(bonustype == DMU_OTN_UINT64_METADATA && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots));
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT0(dn->dn_maxblkid);
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
ASSERT(avl_is_empty(&dn->dn_dbufs));
for (i = 0; i < TXG_SIZE; i++) {
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
}
dn->dn_type = ot;
dnode_setdblksz(dn, blocksize);
dn->dn_indblkshift = ibs;
dn->dn_nlevels = 1;
dn->dn_num_slots = dn_slots;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
dn->dn_nblkptr = 1;
else {
dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
}
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
dn->dn_dirtyctx = 0;
dn->dn_free_txg = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_dirty_txg = 0;
dn->dn_allocated_txg = tx->tx_txg;
dn->dn_id_flags = 0;
dnode_setdirty(dn, tx);
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
}
void
dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, int dn_slots,
boolean_t keep_spill, dmu_tx_t *tx)
{
int nblkptr;
ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
ASSERT0(blocksize % SPA_MINBLOCKSIZE);
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
ASSERT(tx->tx_txg != 0);
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=,
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
ASSERT3U(bonuslen, <=, DN_BONUS_SIZE(dn_slots << DNODE_SHIFT));
dnode_free_interior_slots(dn);
DNODE_STAT_BUMP(dnode_reallocate);
/* clean up any unreferenced dbufs */
dnode_evict_dbufs(dn);
dn->dn_id_flags = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_setdirty(dn, tx);
if (dn->dn_datablksz != blocksize) {
/* change blocksize */
ASSERT0(dn->dn_maxblkid);
ASSERT(BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
dnode_block_freed(dn, 0));
dnode_setdblksz(dn, blocksize);
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = blocksize;
}
if (dn->dn_bonuslen != bonuslen)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = bonuslen;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
nblkptr = 1;
else
nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
if (dn->dn_bonustype != bonustype)
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = bonustype;
if (dn->dn_nblkptr != nblkptr)
dn->dn_next_nblkptr[tx->tx_txg & TXG_MASK] = nblkptr;
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR && !keep_spill) {
dbuf_rm_spill(dn, tx);
dnode_rm_spill(dn, tx);
}
rw_exit(&dn->dn_struct_rwlock);
/* change type */
dn->dn_type = ot;
/* change bonus size and type */
mutex_enter(&dn->dn_mtx);
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_num_slots = dn_slots;
dn->dn_nblkptr = nblkptr;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
/* fix up the bonus db_size */
if (dn->dn_bonus) {
dn->dn_bonus->db.db_size =
DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
}
dn->dn_allocated_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
}
#ifdef _KERNEL
static void
dnode_move_impl(dnode_t *odn, dnode_t *ndn)
{
ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
/* Copy fields. */
ndn->dn_objset = odn->dn_objset;
ndn->dn_object = odn->dn_object;
ndn->dn_dbuf = odn->dn_dbuf;
ndn->dn_handle = odn->dn_handle;
ndn->dn_phys = odn->dn_phys;
ndn->dn_type = odn->dn_type;
ndn->dn_bonuslen = odn->dn_bonuslen;
ndn->dn_bonustype = odn->dn_bonustype;
ndn->dn_nblkptr = odn->dn_nblkptr;
ndn->dn_checksum = odn->dn_checksum;
ndn->dn_compress = odn->dn_compress;
ndn->dn_nlevels = odn->dn_nlevels;
ndn->dn_indblkshift = odn->dn_indblkshift;
ndn->dn_datablkshift = odn->dn_datablkshift;
ndn->dn_datablkszsec = odn->dn_datablkszsec;
ndn->dn_datablksz = odn->dn_datablksz;
ndn->dn_maxblkid = odn->dn_maxblkid;
ndn->dn_num_slots = odn->dn_num_slots;
memcpy(ndn->dn_next_type, odn->dn_next_type,
sizeof (odn->dn_next_type));
memcpy(ndn->dn_next_nblkptr, odn->dn_next_nblkptr,
sizeof (odn->dn_next_nblkptr));
memcpy(ndn->dn_next_nlevels, odn->dn_next_nlevels,
sizeof (odn->dn_next_nlevels));
memcpy(ndn->dn_next_indblkshift, odn->dn_next_indblkshift,
sizeof (odn->dn_next_indblkshift));
memcpy(ndn->dn_next_bonustype, odn->dn_next_bonustype,
sizeof (odn->dn_next_bonustype));
memcpy(ndn->dn_rm_spillblk, odn->dn_rm_spillblk,
sizeof (odn->dn_rm_spillblk));
memcpy(ndn->dn_next_bonuslen, odn->dn_next_bonuslen,
sizeof (odn->dn_next_bonuslen));
memcpy(ndn->dn_next_blksz, odn->dn_next_blksz,
sizeof (odn->dn_next_blksz));
memcpy(ndn->dn_next_maxblkid, odn->dn_next_maxblkid,
sizeof (odn->dn_next_maxblkid));
for (int i = 0; i < TXG_SIZE; i++) {
list_move_tail(&ndn->dn_dirty_records[i],
&odn->dn_dirty_records[i]);
}
memcpy(ndn->dn_free_ranges, odn->dn_free_ranges,
sizeof (odn->dn_free_ranges));
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
ndn->dn_assigned_txg = odn->dn_assigned_txg;
ndn->dn_dirty_txg = odn->dn_dirty_txg;
ndn->dn_dirtyctx = odn->dn_dirtyctx;
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
ASSERT(avl_is_empty(&ndn->dn_dbufs));
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
ndn->dn_dbufs_count = odn->dn_dbufs_count;
ndn->dn_bonus = odn->dn_bonus;
ndn->dn_have_spill = odn->dn_have_spill;
ndn->dn_zio = odn->dn_zio;
ndn->dn_oldused = odn->dn_oldused;
ndn->dn_oldflags = odn->dn_oldflags;
ndn->dn_olduid = odn->dn_olduid;
ndn->dn_oldgid = odn->dn_oldgid;
ndn->dn_oldprojid = odn->dn_oldprojid;
ndn->dn_newuid = odn->dn_newuid;
ndn->dn_newgid = odn->dn_newgid;
ndn->dn_newprojid = odn->dn_newprojid;
ndn->dn_id_flags = odn->dn_id_flags;
ndn->dn_storage_type = odn->dn_storage_type;
dmu_zfetch_init(&ndn->dn_zfetch, ndn);
/*
* Update back pointers. Updating the handle fixes the back pointer of
* every descendant dbuf as well as the bonus dbuf.
*/
ASSERT(ndn->dn_handle->dnh_dnode == odn);
ndn->dn_handle->dnh_dnode = ndn;
/*
* Invalidate the original dnode by clearing all of its back pointers.
*/
odn->dn_dbuf = NULL;
odn->dn_handle = NULL;
avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
odn->dn_dbufs_count = 0;
odn->dn_bonus = NULL;
dmu_zfetch_fini(&odn->dn_zfetch);
/*
* Set the low bit of the objset pointer to ensure that dnode_move()
* recognizes the dnode as invalid in any subsequent callback.
*/
POINTER_INVALIDATE(&odn->dn_objset);
/*
* Satisfy the destructor.
*/
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&odn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
odn->dn_free_ranges[i] = NULL;
odn->dn_next_nlevels[i] = 0;
odn->dn_next_indblkshift[i] = 0;
odn->dn_next_bonustype[i] = 0;
odn->dn_rm_spillblk[i] = 0;
odn->dn_next_bonuslen[i] = 0;
odn->dn_next_blksz[i] = 0;
}
odn->dn_allocated_txg = 0;
odn->dn_free_txg = 0;
odn->dn_assigned_txg = 0;
odn->dn_dirty_txg = 0;
odn->dn_dirtyctx = 0;
odn->dn_dirtyctx_firstset = NULL;
odn->dn_have_spill = B_FALSE;
odn->dn_zio = NULL;
odn->dn_oldused = 0;
odn->dn_oldflags = 0;
odn->dn_olduid = 0;
odn->dn_oldgid = 0;
odn->dn_oldprojid = ZFS_DEFAULT_PROJID;
odn->dn_newuid = 0;
odn->dn_newgid = 0;
odn->dn_newprojid = ZFS_DEFAULT_PROJID;
odn->dn_id_flags = 0;
odn->dn_storage_type = DMU_OT_NONE;
/*
* Mark the dnode.
*/
ndn->dn_moved = 1;
odn->dn_moved = (uint8_t)-1;
}
static kmem_cbrc_t
dnode_move(void *buf, void *newbuf, size_t size, void *arg)
{
dnode_t *odn = buf, *ndn = newbuf;
objset_t *os;
int64_t refcount;
uint32_t dbufs;
#ifndef USE_DNODE_HANDLE
/*
* We can't move dnodes if dbufs reference them directly without
* using handles and respecitve locking. Unless USE_DNODE_HANDLE
* is defined the code below is only to make sure it still builds,
* but it should never be used, since it is unsafe.
*/
#ifdef ZFS_DEBUG
PANIC("dnode_move() called without USE_DNODE_HANDLE");
#endif
return (KMEM_CBRC_NO);
#endif
/*
* The dnode is on the objset's list of known dnodes if the objset
* pointer is valid. We set the low bit of the objset pointer when
* freeing the dnode to invalidate it, and the memory patterns written
* by kmem (baddcafe and deadbeef) set at least one of the two low bits.
* A newly created dnode sets the objset pointer last of all to indicate
* that the dnode is known and in a valid state to be moved by this
* function.
*/
os = odn->dn_objset;
if (!POINTER_IS_VALID(os)) {
DNODE_STAT_BUMP(dnode_move_invalid);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* Ensure that the objset does not go away during the move.
*/
rw_enter(&os_lock, RW_WRITER);
if (os != odn->dn_objset) {
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck1);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* If the dnode is still valid, then so is the objset. We know that no
* valid objset can be freed while we hold os_lock, so we can safely
* ensure that the objset remains in use.
*/
mutex_enter(&os->os_lock);
/*
* Recheck the objset pointer in case the dnode was removed just before
* acquiring the lock.
*/
if (os != odn->dn_objset) {
mutex_exit(&os->os_lock);
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck2);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* At this point we know that as long as we hold os->os_lock, the dnode
* cannot be freed and fields within the dnode can be safely accessed.
* The objset listing this dnode cannot go away as long as this dnode is
* on its list.
*/
rw_exit(&os_lock);
if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_special);
return (KMEM_CBRC_NO);
}
ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
/*
* Lock the dnode handle to prevent the dnode from obtaining any new
* holds. This also prevents the descendant dbufs and the bonus dbuf
* from accessing the dnode, so that we can discount their holds. The
* handle is safe to access because we know that while the dnode cannot
* go away, neither can its handle. Once we hold dnh_zrlock, we can
* safely move any dnode referenced only by dbufs.
*/
if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_handle);
return (KMEM_CBRC_LATER);
}
/*
* Ensure a consistent view of the dnode's holds and the dnode's dbufs.
* We need to guarantee that there is a hold for every dbuf in order to
* determine whether the dnode is actively referenced. Falsely matching
* a dbuf to an active hold would lead to an unsafe move. It's possible
* that a thread already having an active dnode hold is about to add a
* dbuf, and we can't compare hold and dbuf counts while the add is in
* progress.
*/
if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_rwlock);
return (KMEM_CBRC_LATER);
}
/*
* A dbuf may be removed (evicted) without an active dnode hold. In that
* case, the dbuf count is decremented under the handle lock before the
* dbuf's hold is released. This order ensures that if we count the hold
* after the dbuf is removed but before its hold is released, we will
* treat the unmatched hold as active and exit safely. If we count the
* hold before the dbuf is removed, the hold is discounted, and the
* removal is blocked until the move completes.
*/
refcount = zfs_refcount_count(&odn->dn_holds);
ASSERT(refcount >= 0);
dbufs = DN_DBUFS_COUNT(odn);
/* We can't have more dbufs than dnode holds. */
ASSERT3U(dbufs, <=, refcount);
DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
uint32_t, dbufs);
if (refcount > dbufs) {
rw_exit(&odn->dn_struct_rwlock);
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_active);
return (KMEM_CBRC_LATER);
}
rw_exit(&odn->dn_struct_rwlock);
/*
* At this point we know that anyone with a hold on the dnode is not
* actively referencing it. The dnode is known and in a valid state to
* move. We're holding the locks needed to execute the critical section.
*/
dnode_move_impl(odn, ndn);
list_link_replace(&odn->dn_link, &ndn->dn_link);
/* If the dnode was safe to move, the refcount cannot have changed. */
ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
ASSERT(dbufs == DN_DBUFS_COUNT(ndn));
zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
mutex_exit(&os->os_lock);
return (KMEM_CBRC_YES);
}
#endif /* _KERNEL */
static void
dnode_slots_hold(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
zrl_add(&dnh->dnh_zrlock);
}
}
static void
dnode_slots_rele(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (zrl_is_locked(&dnh->dnh_zrlock))
zrl_exit(&dnh->dnh_zrlock);
else
zrl_remove(&dnh->dnh_zrlock);
}
}
static int
dnode_slots_tryenter(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (!zrl_tryenter(&dnh->dnh_zrlock)) {
for (int j = idx; j < i; j++) {
dnh = &children->dnc_children[j];
zrl_exit(&dnh->dnh_zrlock);
}
return (0);
}
}
return (1);
}
static void
dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnh->dnh_dnode = ptr;
}
}
static boolean_t
dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
/*
* If all dnode slots are either already free or
* evictable return B_TRUE.
*/
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnode_t *dn = dnh->dnh_dnode;
if (dn == DN_SLOT_FREE) {
continue;
} else if (DN_SLOT_IS_PTR(dn)) {
mutex_enter(&dn->dn_mtx);
boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
zfs_refcount_is_zero(&dn->dn_holds) &&
!DNODE_IS_DIRTY(dn));
mutex_exit(&dn->dn_mtx);
if (!can_free)
return (B_FALSE);
else
continue;
} else {
return (B_FALSE);
}
}
return (B_TRUE);
}
static uint_t
dnode_reclaim_slots(dnode_children_t *children, int idx, int slots)
{
uint_t reclaimed = 0;
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE);
dnode_destroy(dnh->dnh_dnode);
dnh->dnh_dnode = DN_SLOT_FREE;
reclaimed++;
}
}
return (reclaimed);
}
void
dnode_free_interior_slots(dnode_t *dn)
{
dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db);
int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT;
int idx = (dn->dn_object & (epb - 1)) + 1;
int slots = dn->dn_num_slots - 1;
if (slots == 0)
return;
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
while (!dnode_slots_tryenter(children, idx, slots)) {
DNODE_STAT_BUMP(dnode_free_interior_lock_retry);
kpreempt(KPREEMPT_SYNC);
}
dnode_set_slots(children, idx, slots, DN_SLOT_FREE);
dnode_slots_rele(children, idx, slots);
}
void
dnode_special_close(dnode_handle_t *dnh)
{
dnode_t *dn = dnh->dnh_dnode;
/*
* Ensure dnode_rele_and_unlock() has released dn_mtx, after final
* zfs_refcount_remove()
*/
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_count(&dn->dn_holds) > 0)
cv_wait(&dn->dn_nodnholds, &dn->dn_mtx);
mutex_exit(&dn->dn_mtx);
ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 0);
ASSERT(dn->dn_dbuf == NULL ||
dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
zrl_add(&dnh->dnh_zrlock);
dnode_destroy(dn); /* implicit zrl_remove() */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = NULL;
}
void
dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
dnode_handle_t *dnh)
{
dnode_t *dn;
zrl_init(&dnh->dnh_zrlock);
VERIFY3U(1, ==, zrl_tryenter(&dnh->dnh_zrlock));
dn = dnode_create(os, dnp, NULL, object, dnh);
DNODE_VERIFY(dn);
zrl_exit(&dnh->dnh_zrlock);
}
static void
dnode_buf_evict_async(void *dbu)
{
dnode_children_t *dnc = dbu;
DNODE_STAT_BUMP(dnode_buf_evict);
for (int i = 0; i < dnc->dnc_count; i++) {
dnode_handle_t *dnh = &dnc->dnc_children[i];
dnode_t *dn;
/*
* The dnode handle lock guards against the dnode moving to
* another valid address, so there is no need here to guard
* against changes to or from NULL.
*/
if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
continue;
}
zrl_add(&dnh->dnh_zrlock);
dn = dnh->dnh_dnode;
/*
* If there are holds on this dnode, then there should
* be holds on the dnode's containing dbuf as well; thus
* it wouldn't be eligible for eviction and this function
* would not have been called.
*/
ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
dnode_destroy(dn); /* implicit zrl_remove() for first slot */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
}
kmem_free(dnc, sizeof (dnode_children_t) +
dnc->dnc_count * sizeof (dnode_handle_t));
}
/*
* When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
* to ensure the hole at the specified object offset is large enough to
* hold the dnode being created. The slots parameter is also used to ensure
* a dnode does not span multiple dnode blocks. In both of these cases, if
* a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
* are only possible when using DNODE_MUST_BE_FREE.
*
* If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
* dnode_hold_impl() will check if the requested dnode is already consumed
* as an extra dnode slot by an large dnode, in which case it returns
* ENOENT.
*
* If the DNODE_DRY_RUN flag is set, we don't actually hold the dnode, just
* return whether the hold would succeed or not. tag and dnp should set to
* NULL in this case.
*
* errors:
* EINVAL - Invalid object number or flags.
* ENOSPC - Hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
* EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
* - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
* - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
* ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
* - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
* EIO - I/O error when reading the meta dnode dbuf.
*
* succeeds even for free dnodes.
*/
int
dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
const void *tag, dnode_t **dnp)
{
int epb, idx, err;
int drop_struct_lock = FALSE;
int type;
uint64_t blk;
dnode_t *mdn, *dn;
dmu_buf_impl_t *db;
dnode_children_t *dnc;
dnode_phys_t *dn_block;
dnode_handle_t *dnh;
ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
IMPLY(flag & DNODE_DRY_RUN, (tag == NULL) && (dnp == NULL));
/*
* If you are holding the spa config lock as writer, you shouldn't
* be asking the DMU to do *anything* unless it's the root pool
* which may require us to read from the root filesystem while
* holding some (not all) of the locks as writer.
*/
ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
(spa_is_root(os->os_spa) &&
spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
object == DMU_PROJECTUSED_OBJECT) {
if (object == DMU_USERUSED_OBJECT)
dn = DMU_USERUSED_DNODE(os);
else if (object == DMU_GROUPUSED_OBJECT)
dn = DMU_GROUPUSED_DNODE(os);
else
dn = DMU_PROJECTUSED_DNODE(os);
if (dn == NULL)
return (SET_ERROR(ENOENT));
type = dn->dn_type;
if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
return (SET_ERROR(ENOENT));
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
return (SET_ERROR(EEXIST));
DNODE_VERIFY(dn);
/* Don't actually hold if dry run, just return 0 */
if (!(flag & DNODE_DRY_RUN)) {
(void) zfs_refcount_add(&dn->dn_holds, tag);
*dnp = dn;
}
return (0);
}
if (object == 0 || object >= DN_MAX_OBJECT)
return (SET_ERROR(EINVAL));
mdn = DMU_META_DNODE(os);
ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
DNODE_VERIFY(mdn);
if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
rw_enter(&mdn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
db = dbuf_hold(mdn, blk, FTAG);
if (drop_struct_lock)
rw_exit(&mdn->dn_struct_rwlock);
if (db == NULL) {
DNODE_STAT_BUMP(dnode_hold_dbuf_hold);
return (SET_ERROR(EIO));
}
/*
* We do not need to decrypt to read the dnode so it doesn't matter
* if we get the encrypted or decrypted version.
*/
err = dbuf_read(db, NULL, DB_RF_CANFAIL |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (err) {
DNODE_STAT_BUMP(dnode_hold_dbuf_read);
dbuf_rele(db, FTAG);
return (err);
}
ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
epb = db->db.db_size >> DNODE_SHIFT;
idx = object & (epb - 1);
dn_block = (dnode_phys_t *)db->db.db_data;
ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
dnc = dmu_buf_get_user(&db->db);
dnh = NULL;
if (dnc == NULL) {
dnode_children_t *winner;
int skip = 0;
dnc = kmem_zalloc(sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t), KM_SLEEP);
dnc->dnc_count = epb;
dnh = &dnc->dnc_children[0];
/* Initialize dnode slot status from dnode_phys_t */
for (int i = 0; i < epb; i++) {
zrl_init(&dnh[i].dnh_zrlock);
if (skip) {
skip--;
continue;
}
if (dn_block[i].dn_type != DMU_OT_NONE) {
int interior = dn_block[i].dn_extra_slots;
dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED);
dnode_set_slots(dnc, i + 1, interior,
DN_SLOT_INTERIOR);
skip = interior;
} else {
dnh[i].dnh_dnode = DN_SLOT_FREE;
skip = 0;
}
}
dmu_buf_init_user(&dnc->dnc_dbu, NULL,
dnode_buf_evict_async, NULL);
winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu);
if (winner != NULL) {
for (int i = 0; i < epb; i++)
zrl_destroy(&dnh[i].dnh_zrlock);
kmem_free(dnc, sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t));
dnc = winner;
}
}
ASSERT(dnc->dnc_count == epb);
if (flag & DNODE_MUST_BE_ALLOCATED) {
slots = 1;
dnode_slots_hold(dnc, idx, slots);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) {
DNODE_STAT_BUMP(dnode_hold_alloc_interior);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
} else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) {
DNODE_STAT_BUMP(dnode_hold_alloc_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
} else {
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry);
kpreempt(KPREEMPT_SYNC);
}
/*
* Someone else won the race and called dnode_create()
* after we checked DN_SLOT_IS_PTR() above but before
* we acquired the lock.
*/
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
dmu_buf_add_user_size(&db->db,
sizeof (dnode_t));
}
}
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
DNODE_STAT_BUMP(dnode_hold_alloc_hits);
} else if (flag & DNODE_MUST_BE_FREE) {
if (idx + slots - 1 >= DNODES_PER_BLOCK) {
DNODE_STAT_BUMP(dnode_hold_free_overflow);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_hold(dnc, idx, slots);
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_retry);
kpreempt(KPREEMPT_SYNC);
}
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
/*
* Allocated but otherwise free dnodes which would
* be in the interior of a multi-slot dnodes need
* to be freed. Single slot dnodes can be safely
* re-purposed as a performance optimization.
*/
if (slots > 1) {
uint_t reclaimed =
dnode_reclaim_slots(dnc, idx + 1, slots - 1);
if (reclaimed > 0)
dmu_buf_sub_user_size(&db->db,
reclaimed * sizeof (dnode_t));
}
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
dmu_buf_add_user_size(&db->db, sizeof (dnode_t));
}
mutex_enter(&dn->dn_mtx);
if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
DNODE_STAT_BUMP(dnode_hold_free_refcount);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
DNODE_STAT_BUMP(dnode_hold_free_hits);
} else {
dbuf_rele(db, FTAG);
return (SET_ERROR(EINVAL));
}
ASSERT0(dn->dn_free_txg);
if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
dbuf_add_ref(db, dnh);
mutex_exit(&dn->dn_mtx);
/* Now we can rely on the hold to prevent the dnode from moving. */
dnode_slots_rele(dnc, idx, slots);
DNODE_VERIFY(dn);
ASSERT3P(dnp, !=, NULL);
ASSERT3P(dn->dn_dbuf, ==, db);
ASSERT3U(dn->dn_object, ==, object);
dbuf_rele(db, FTAG);
*dnp = dn;
return (0);
}
/*
* Return held dnode if the object is allocated, NULL if not.
*/
int
dnode_hold(objset_t *os, uint64_t object, const void *tag, dnode_t **dnp)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
dnp));
}
/*
* Can only add a reference if there is already at least one
* reference on the dnode. Returns FALSE if unable to add a
* new reference.
*/
boolean_t
dnode_add_ref(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_is_zero(&dn->dn_holds)) {
mutex_exit(&dn->dn_mtx);
return (FALSE);
}
VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
mutex_exit(&dn->dn_mtx);
return (TRUE);
}
void
dnode_rele(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, tag, B_FALSE);
}
void
dnode_rele_and_unlock(dnode_t *dn, const void *tag, boolean_t evicting)
{
uint64_t refs;
/* Get while the hold prevents the dnode from moving. */
dmu_buf_impl_t *db = dn->dn_dbuf;
dnode_handle_t *dnh = dn->dn_handle;
refs = zfs_refcount_remove(&dn->dn_holds, tag);
if (refs == 0)
cv_broadcast(&dn->dn_nodnholds);
mutex_exit(&dn->dn_mtx);
/* dnode could get destroyed at this point, so don't use it anymore */
/*
* It's unsafe to release the last hold on a dnode by dnode_rele() or
* indirectly by dbuf_rele() while relying on the dnode handle to
* prevent the dnode from moving, since releasing the last hold could
* result in the dnode's parent dbuf evicting its dnode handles. For
* that reason anyone calling dnode_rele() or dbuf_rele() without some
* other direct or indirect hold on the dnode must first drop the dnode
* handle.
*/
#ifdef ZFS_DEBUG
ASSERT(refs > 0 || zrl_owner(&dnh->dnh_zrlock) != curthread);
#endif
/* NOTE: the DNODE_DNODE does not have a dn_dbuf */
if (refs == 0 && db != NULL) {
/*
* Another thread could add a hold to the dnode handle in
* dnode_hold_impl() while holding the parent dbuf. Since the
* hold on the parent dbuf prevents the handle from being
* destroyed, the hold on the handle is OK. We can't yet assert
* that the handle has zero references, but that will be
* asserted anyway when the handle gets destroyed.
*/
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, dnh, evicting);
}
}
/*
* Test whether we can create a dnode at the specified location.
*/
int
dnode_try_claim(objset_t *os, uint64_t object, int slots)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_FREE | DNODE_DRY_RUN,
slots, NULL, NULL));
}
/*
* Checks if the dnode itself is dirty, or is carrying any uncommitted records.
* It is important to check both conditions, as some operations (eg appending
* to a file) can dirty both as a single logical unit, but they are not synced
* out atomically, so checking one and not the other can result in an object
* appearing to be clean mid-way through a commit.
*
* Do not change this lightly! If you get it wrong, dmu_offset_next() can
* detect a hole where there is really data, leading to silent corruption.
*/
boolean_t
dnode_is_dirty(dnode_t *dn)
{
mutex_enter(&dn->dn_mtx);
for (int i = 0; i < TXG_SIZE; i++) {
if (multilist_link_active(&dn->dn_dirty_link[i]) ||
!list_is_empty(&dn->dn_dirty_records[i])) {
mutex_exit(&dn->dn_mtx);
return (B_TRUE);
}
}
mutex_exit(&dn->dn_mtx);
return (B_FALSE);
}
void
dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
uint64_t txg = tx->tx_txg;
if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
dsl_dataset_dirty(os->os_dsl_dataset, tx);
return;
}
DNODE_VERIFY(dn);
#ifdef ZFS_DEBUG
mutex_enter(&dn->dn_mtx);
ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
mutex_exit(&dn->dn_mtx);
#endif
/*
* Determine old uid/gid when necessary
*/
dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
multilist_t *dirtylist = &os->os_dirty_dnodes[txg & TXG_MASK];
multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn);
/*
* If we are already marked dirty, we're done.
*/
if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
multilist_sublist_unlock(mls);
return;
}
ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
!avl_is_empty(&dn->dn_dbufs));
ASSERT(dn->dn_datablksz != 0);
ASSERT0(dn->dn_next_bonuslen[txg & TXG_MASK]);
ASSERT0(dn->dn_next_blksz[txg & TXG_MASK]);
ASSERT0(dn->dn_next_bonustype[txg & TXG_MASK]);
dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
(u_longlong_t)dn->dn_object, (u_longlong_t)txg);
multilist_sublist_insert_head(mls, dn);
multilist_sublist_unlock(mls);
/*
* The dnode maintains a hold on its containing dbuf as
* long as there are holds on it. Each instantiated child
* dbuf maintains a hold on the dnode. When the last child
* drops its hold, the dnode will drop its hold on the
* containing dbuf. We add a "dirty hold" here so that the
* dnode will hang around after we finish processing its
* children.
*/
VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
(void) dbuf_dirty(dn->dn_dbuf, tx);
dsl_dataset_dirty(os->os_dsl_dataset, tx);
}
void
dnode_free(dnode_t *dn, dmu_tx_t *tx)
{
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
mutex_exit(&dn->dn_mtx);
return;
}
dn->dn_free_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
}
/*
* Try to change the block size for the indicated dnode. This can only
* succeed if there are no blocks allocated or dirty beyond first block
*/
int
dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int err;
ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (size == 0)
size = SPA_MINBLOCKSIZE;
else
size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
if (ibs == dn->dn_indblkshift)
ibs = 0;
if (size == dn->dn_datablksz && ibs == 0)
return (0);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
/* Check for any allocated blocks beyond the first */
if (dn->dn_maxblkid != 0)
goto fail;
mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL;
db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_exit(&dn->dn_dbufs_mtx);
goto fail;
}
}
mutex_exit(&dn->dn_dbufs_mtx);
if (ibs && dn->dn_nlevels != 1)
goto fail;
dnode_setdirty(dn, tx);
if (size != dn->dn_datablksz) {
/* resize the old block */
err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
if (err == 0) {
dbuf_new_size(db, size, tx);
} else if (err != ENOENT) {
goto fail;
}
dnode_setdblksz(dn, size);
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = size;
if (db)
dbuf_rele(db, FTAG);
}
if (ibs) {
dn->dn_indblkshift = ibs;
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
}
rw_exit(&dn->dn_struct_rwlock);
return (0);
fail:
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(ENOTSUP));
}
static void
dnode_set_nlevels_impl(dnode_t *dn, int new_nlevels, dmu_tx_t *tx)
{
uint64_t txgoff = tx->tx_txg & TXG_MASK;
int old_nlevels = dn->dn_nlevels;
dmu_buf_impl_t *db;
list_t *list;
dbuf_dirty_record_t *new, *dr, *dr_next;
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT3U(new_nlevels, >, dn->dn_nlevels);
dn->dn_nlevels = new_nlevels;
ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
dn->dn_next_nlevels[txgoff] = new_nlevels;
/* dirty the left indirects */
db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
ASSERT(db != NULL);
new = dbuf_dirty(db, tx);
dbuf_rele(db, FTAG);
/* transfer the dirty records to the new indirect */
mutex_enter(&dn->dn_mtx);
mutex_enter(&new->dt.di.dr_mtx);
list = &dn->dn_dirty_records[txgoff];
for (dr = list_head(list); dr; dr = dr_next) {
dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
IMPLY(dr->dr_dbuf == NULL, old_nlevels == 1);
if (dr->dr_dbuf == NULL ||
(dr->dr_dbuf->db_level == old_nlevels - 1 &&
dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID)) {
list_remove(&dn->dn_dirty_records[txgoff], dr);
list_insert_tail(&new->dt.di.dr_children, dr);
dr->dr_parent = new;
}
}
mutex_exit(&new->dt.di.dr_mtx);
mutex_exit(&dn->dn_mtx);
}
int
dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx)
{
int ret = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_nlevels == nlevels) {
ret = 0;
goto out;
} else if (nlevels < dn->dn_nlevels) {
ret = SET_ERROR(EINVAL);
goto out;
}
dnode_set_nlevels_impl(dn, nlevels, tx);
out:
rw_exit(&dn->dn_struct_rwlock);
return (ret);
}
/* read-holding callers must not rely on the lock being continuously held */
void
dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read,
boolean_t force)
{
int epbs, new_nlevels;
uint64_t sz;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(have_read ?
RW_READ_HELD(&dn->dn_struct_rwlock) :
RW_WRITE_HELD(&dn->dn_struct_rwlock));
/*
* if we have a read-lock, check to see if we need to do any work
* before upgrading to a write-lock.
*/
if (have_read) {
if (blkid <= dn->dn_maxblkid)
return;
if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
}
}
/*
* Raw sends (indicated by the force flag) require that we take the
* given blkid even if the value is lower than the current value.
*/
if (!force && blkid <= dn->dn_maxblkid)
goto out;
/*
* We use the (otherwise unused) top bit of dn_next_maxblkid[txgoff]
* to indicate that this field is set. This allows us to set the
* maxblkid to 0 on an existing object in dnode_sync().
*/
dn->dn_maxblkid = blkid;
dn->dn_next_maxblkid[tx->tx_txg & TXG_MASK] =
blkid | DMU_NEXT_MAXBLKID_SET;
/*
* Compute the number of levels necessary to support the new maxblkid.
* Raw sends will ensure nlevels is set correctly for us.
*/
new_nlevels = 1;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
for (sz = dn->dn_nblkptr;
sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
new_nlevels++;
ASSERT3U(new_nlevels, <=, DN_MAX_LEVELS);
if (!force) {
if (new_nlevels > dn->dn_nlevels)
dnode_set_nlevels_impl(dn, new_nlevels, tx);
} else {
ASSERT3U(dn->dn_nlevels, >=, new_nlevels);
}
out:
if (have_read)
rw_downgrade(&dn->dn_struct_rwlock);
}
static void
dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
if (db != NULL) {
dmu_buf_will_dirty(&db->db, tx);
dbuf_rele(db, FTAG);
}
}
/*
* Dirty all the in-core level-1 dbufs in the range specified by start_blkid
* and end_blkid.
*/
static void
dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db;
avl_index_t where;
db_search = kmem_zalloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
mutex_enter(&dn->dn_dbufs_mtx);
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
for (;;) {
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
if (db == NULL || db->db_level != 1 ||
db->db_blkid >= end_blkid) {
break;
}
/*
* Setup the next blkid we want to search for.
*/
db_search->db_blkid = db->db_blkid + 1;
ASSERT3U(db->db_blkid, >=, start_blkid);
/*
* If the dbuf transitions to DB_EVICTING while we're trying
* to dirty it, then we will be unable to discover it in
* the dbuf hash table. This will result in a call to
* dbuf_create() which needs to acquire the dn_dbufs_mtx
* lock. To avoid a deadlock, we drop the lock before
* dirtying the level-1 dbuf.
*/
mutex_exit(&dn->dn_dbufs_mtx);
dnode_dirty_l1(dn, db->db_blkid, tx);
mutex_enter(&dn->dn_dbufs_mtx);
}
#ifdef ZFS_DEBUG
/*
* Walk all the in-core level-1 dbufs and verify they have been dirtied.
*/
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_level != 1 || db->db_blkid >= end_blkid)
break;
if (db->db_state != DB_EVICTING)
ASSERT(db->db_dirtycnt > 0);
}
#endif
kmem_free(db_search, sizeof (dmu_buf_impl_t));
mutex_exit(&dn->dn_dbufs_mtx);
}
void
dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag)
{
/*
* Don't set dirtyctx to SYNC if we're just modifying this as we
* initialize the objset.
*/
if (dn->dn_dirtyctx == DN_UNDIRTIED) {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
if (ds != NULL) {
rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
}
if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
if (dmu_tx_is_syncing(tx))
dn->dn_dirtyctx = DN_DIRTY_SYNC;
else
dn->dn_dirtyctx = DN_DIRTY_OPEN;
dn->dn_dirtyctx_firstset = tag;
}
if (ds != NULL) {
rrw_exit(&ds->ds_bp_rwlock, tag);
}
}
}
static void
dnode_partial_zero(dnode_t *dn, uint64_t off, uint64_t blkoff, uint64_t len,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int res;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off), TRUE, FALSE,
FTAG, &db);
rw_exit(&dn->dn_struct_rwlock);
if (res == 0) {
db_lock_type_t dblt;
boolean_t dirty;
dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
/* don't dirty if not on disk and not dirty */
dirty = !list_is_empty(&db->db_dirty_records) ||
(db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
dmu_buf_unlock_parent(db, dblt, FTAG);
if (dirty) {
caddr_t data;
dmu_buf_will_dirty(&db->db, tx);
data = db->db.db_data;
memset(data + blkoff, 0, len);
}
dbuf_rele(db, FTAG);
}
}
void
dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
{
uint64_t blkoff, blkid, nblks;
int blksz, blkshift, head, tail;
int trunc = FALSE;
int epbs;
blksz = dn->dn_datablksz;
blkshift = dn->dn_datablkshift;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
if (len == DMU_OBJECT_END) {
len = UINT64_MAX - off;
trunc = TRUE;
}
/*
* First, block align the region to free:
*/
if (ISP2(blksz)) {
head = P2NPHASE(off, blksz);
blkoff = P2PHASE(off, blksz);
if ((off >> blkshift) > dn->dn_maxblkid)
return;
} else {
ASSERT(dn->dn_maxblkid == 0);
if (off == 0 && len >= blksz) {
/*
* Freeing the whole block; fast-track this request.
*/
blkid = 0;
nblks = 1;
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_dirty_l1(dn, 0, tx);
rw_exit(&dn->dn_struct_rwlock);
}
goto done;
} else if (off >= blksz) {
/* Freeing past end-of-data */
return;
} else {
/* Freeing part of the block. */
head = blksz - off;
ASSERT3U(head, >, 0);
}
blkoff = off;
}
/* zero out any partial block data at the start of the range */
if (head) {
ASSERT3U(blkoff + head, ==, blksz);
if (len < head)
head = len;
dnode_partial_zero(dn, off, blkoff, head, tx);
off += head;
len -= head;
}
/* If the range was less than one block, we're done */
if (len == 0)
return;
/* If the remaining range is past end of file, we're done */
if ((off >> blkshift) > dn->dn_maxblkid)
return;
ASSERT(ISP2(blksz));
if (trunc)
tail = 0;
else
tail = P2PHASE(len, blksz);
ASSERT0(P2PHASE(off, blksz));
/* zero out any partial block data at the end of the range */
if (tail) {
if (len < tail)
tail = len;
dnode_partial_zero(dn, off + len, 0, tail, tx);
len -= tail;
}
/* If the range did not include a full block, we are done */
if (len == 0)
return;
ASSERT(IS_P2ALIGNED(off, blksz));
ASSERT(trunc || IS_P2ALIGNED(len, blksz));
blkid = off >> blkshift;
nblks = len >> blkshift;
if (trunc)
nblks += 1;
/*
* Dirty all the indirect blocks in this range. Note that only
* the first and last indirect blocks can actually be written
* (if they were partially freed) -- they must be dirtied, even if
* they do not exist on disk yet. The interior blocks will
* be freed by free_children(), so they will not actually be written.
* Even though these interior blocks will not be written, we
* dirty them for two reasons:
*
* - It ensures that the indirect blocks remain in memory until
* syncing context. (They have already been prefetched by
* dmu_tx_hold_free(), so we don't have to worry about reading
* them serially here.)
*
* - The dirty space accounting will put pressure on the txg sync
* mechanism to begin syncing, and to delay transactions if there
* is a large amount of freeing. Even though these indirect
* blocks will not be written, we could need to write the same
* amount of space if we copy the freed BPs into deadlists.
*/
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
uint64_t first, last;
first = blkid >> epbs;
dnode_dirty_l1(dn, first, tx);
if (trunc)
last = dn->dn_maxblkid >> epbs;
else
last = (blkid + nblks - 1) >> epbs;
if (last != first)
dnode_dirty_l1(dn, last, tx);
dnode_dirty_l1range(dn, first, last, tx);
int shift = dn->dn_datablkshift + dn->dn_indblkshift -
SPA_BLKPTRSHIFT;
for (uint64_t i = first + 1; i < last; i++) {
/*
* Set i to the blockid of the next non-hole
* level-1 indirect block at or after i. Note
* that dnode_next_offset() operates in terms of
* level-0-equivalent bytes.
*/
uint64_t ibyte = i << shift;
int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&ibyte, 2, 1, 0);
i = ibyte >> shift;
if (i >= last)
break;
/*
* Normally we should not see an error, either
* from dnode_next_offset() or dbuf_hold_level()
* (except for ESRCH from dnode_next_offset).
* If there is an i/o error, then when we read
* this block in syncing context, it will use
* ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
* to the "failmode" property. dnode_next_offset()
* doesn't have a flag to indicate MUSTSUCCEED.
*/
if (err != 0)
break;
dnode_dirty_l1(dn, i, tx);
}
rw_exit(&dn->dn_struct_rwlock);
}
done:
/*
* Add this range to the dnode range list.
* We will finish up this free operation in the syncing phase.
*/
mutex_enter(&dn->dn_mtx);
{
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] == NULL) {
- dn->dn_free_ranges[txgoff] = range_tree_create(NULL,
- RANGE_SEG64, NULL, 0, 0);
+ dn->dn_free_ranges[txgoff] = zfs_range_tree_create(NULL,
+ ZFS_RANGE_SEG64, NULL, 0, 0);
}
- range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
- range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
+ zfs_range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
+ zfs_range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
}
dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
(u_longlong_t)blkid, (u_longlong_t)nblks,
(u_longlong_t)tx->tx_txg);
mutex_exit(&dn->dn_mtx);
dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
dnode_setdirty(dn, tx);
}
static boolean_t
dnode_spill_freed(dnode_t *dn)
{
int i;
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
uint64_t
dnode_block_freed(dnode_t *dn, uint64_t blkid)
{
int i;
if (blkid == DMU_BONUS_BLKID)
return (FALSE);
if (dn->dn_free_txg)
return (TRUE);
if (blkid == DMU_SPILL_BLKID)
return (dnode_spill_freed(dn));
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_free_ranges[i] != NULL &&
- range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
+ zfs_range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* call from syncing context when we actually write/free space for this dnode */
void
dnode_diduse_space(dnode_t *dn, int64_t delta)
{
uint64_t space;
dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
dn, dn->dn_phys,
(u_longlong_t)dn->dn_phys->dn_used,
(longlong_t)delta);
mutex_enter(&dn->dn_mtx);
space = DN_USED_BYTES(dn->dn_phys);
if (delta > 0) {
ASSERT3U(space + delta, >=, space); /* no overflow */
} else {
ASSERT3U(space, >=, -delta); /* no underflow */
}
space += delta;
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
dn->dn_phys->dn_used = space >> DEV_BSHIFT;
} else {
dn->dn_phys->dn_used = space;
dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
}
mutex_exit(&dn->dn_mtx);
}
/*
* Scans a block at the indicated "level" looking for a hole or data,
* depending on 'flags'.
*
* If level > 0, then we are scanning an indirect block looking at its
* pointers. If level == 0, then we are looking at a block of dnodes.
*
* If we don't find what we are looking for in the block, we return ESRCH.
* Otherwise, return with *offset pointing to the beginning (if searching
* forwards) or end (if searching backwards) of the range covered by the
* block pointer we matched on (or dnode).
*
* The basic search algorithm used below by dnode_next_offset() is to
* use this function to search up the block tree (widen the search) until
* we find something (i.e., we don't return ESRCH) and then search back
* down the tree (narrow the search) until we reach our original search
* level.
*/
static int
dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
int lvl, uint64_t blkfill, uint64_t txg)
{
dmu_buf_impl_t *db = NULL;
void *data = NULL;
uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
uint64_t epb = 1ULL << epbs;
uint64_t minfill, maxfill;
boolean_t hole;
int i, inc, error, span;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
hole = ((flags & DNODE_FIND_HOLE) != 0);
inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
ASSERT(txg == 0 || !hole);
if (lvl == dn->dn_phys->dn_nlevels) {
error = 0;
epb = dn->dn_phys->dn_nblkptr;
data = dn->dn_phys->dn_blkptr;
} else {
uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
if (error) {
if (error != ENOENT)
return (error);
if (hole)
return (0);
/*
* This can only happen when we are searching up
* the block tree for data. We don't really need to
* adjust the offset, as we will just end up looking
* at the pointer to this block in its parent, and its
* going to be unallocated, so we will skip over it.
*/
return (SET_ERROR(ESRCH));
}
error = dbuf_read(db, NULL,
DB_RF_CANFAIL | DB_RF_HAVESTRUCT |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (error) {
dbuf_rele(db, FTAG);
return (error);
}
data = db->db.db_data;
rw_enter(&db->db_rwlock, RW_READER);
}
if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
BP_GET_LOGICAL_BIRTH(db->db_blkptr) <= txg ||
BP_IS_HOLE(db->db_blkptr))) {
/*
* This can only happen when we are searching up the tree
* and these conditions mean that we need to keep climbing.
*/
error = SET_ERROR(ESRCH);
} else if (lvl == 0) {
dnode_phys_t *dnp = data;
ASSERT(dn->dn_type == DMU_OT_DNODE);
ASSERT(!(flags & DNODE_FIND_BACKWARDS));
for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1);
i < blkfill; i += dnp[i].dn_extra_slots + 1) {
if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
break;
}
if (i == blkfill)
error = SET_ERROR(ESRCH);
*offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) +
(i << DNODE_SHIFT);
} else {
blkptr_t *bp = data;
uint64_t start = *offset;
span = (lvl - 1) * epbs + dn->dn_datablkshift;
minfill = 0;
maxfill = blkfill << ((lvl - 1) * epbs);
if (hole)
maxfill--;
else
minfill++;
if (span >= 8 * sizeof (*offset)) {
/* This only happens on the highest indirection level */
ASSERT3U((lvl - 1), ==, dn->dn_phys->dn_nlevels - 1);
*offset = 0;
} else {
*offset = *offset >> span;
}
for (i = BF64_GET(*offset, 0, epbs);
i >= 0 && i < epb; i += inc) {
if (BP_GET_FILL(&bp[i]) >= minfill &&
BP_GET_FILL(&bp[i]) <= maxfill &&
(hole || BP_GET_LOGICAL_BIRTH(&bp[i]) > txg))
break;
if (inc > 0 || *offset > 0)
*offset += inc;
}
if (span >= 8 * sizeof (*offset)) {
*offset = start;
} else {
*offset = *offset << span;
}
if (inc < 0) {
/* traversing backwards; position offset at the end */
if (span < 8 * sizeof (*offset))
*offset = MIN(*offset + (1ULL << span) - 1,
start);
} else if (*offset < start) {
*offset = start;
}
if (i < 0 || i >= epb)
error = SET_ERROR(ESRCH);
}
if (db != NULL) {
rw_exit(&db->db_rwlock);
dbuf_rele(db, FTAG);
}
return (error);
}
/*
* Find the next hole, data, or sparse region at or after *offset.
* The value 'blkfill' tells us how many items we expect to find
* in an L0 data block; this value is 1 for normal objects,
* DNODES_PER_BLOCK for the meta dnode, and some fraction of
* DNODES_PER_BLOCK when searching for sparse regions thereof.
*
* Examples:
*
* dnode_next_offset(dn, flags, offset, 1, 1, 0);
* Finds the next/previous hole/data in a file.
* Used in dmu_offset_next().
*
* dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
* Finds the next free/allocated dnode an objset's meta-dnode.
* Only finds objects that have new contents since txg (ie.
* bonus buffer changes and content removal are ignored).
* Used in dmu_object_next().
*
* dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
* Finds the next L2 meta-dnode bp that's at most 1/4 full.
* Used in dmu_object_alloc().
*/
int
dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
int minlvl, uint64_t blkfill, uint64_t txg)
{
uint64_t initial_offset = *offset;
int lvl, maxlvl;
int error = 0;
if (!(flags & DNODE_FIND_HAVELOCK))
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_phys->dn_nlevels == 0) {
error = SET_ERROR(ESRCH);
goto out;
}
if (dn->dn_datablkshift == 0) {
if (*offset < dn->dn_datablksz) {
if (flags & DNODE_FIND_HOLE)
*offset = dn->dn_datablksz;
} else {
error = SET_ERROR(ESRCH);
}
goto out;
}
maxlvl = dn->dn_phys->dn_nlevels;
for (lvl = minlvl; lvl <= maxlvl; lvl++) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
if (error != ESRCH)
break;
}
while (error == 0 && --lvl >= minlvl) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
}
/*
* There's always a "virtual hole" at the end of the object, even
* if all BP's which physically exist are non-holes.
*/
if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
error = 0;
}
if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
initial_offset < *offset : initial_offset > *offset))
error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dnode_hold);
EXPORT_SYMBOL(dnode_rele);
EXPORT_SYMBOL(dnode_set_nlevels);
EXPORT_SYMBOL(dnode_set_blksz);
EXPORT_SYMBOL(dnode_free_range);
EXPORT_SYMBOL(dnode_evict_dbufs);
EXPORT_SYMBOL(dnode_evict_bonus);
#endif
ZFS_MODULE_PARAM(zfs, zfs_, default_bs, INT, ZMOD_RW,
"Default dnode block shift");
ZFS_MODULE_PARAM(zfs, zfs_, default_ibs, INT, ZMOD_RW,
"Default dnode indirect block shift");
diff --git a/sys/contrib/openzfs/module/zfs/dnode_sync.c b/sys/contrib/openzfs/module/zfs/dnode_sync.c
index 122d7d0d17d8..c82f45145d4b 100644
--- a/sys/contrib/openzfs/module/zfs/dnode_sync.c
+++ b/sys/contrib/openzfs/module/zfs/dnode_sync.c
@@ -1,881 +1,882 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2020 Oxide Computer Company
*/
#include <sys/zfs_context.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_recv.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/range_tree.h>
#include <sys/zfeature.h>
static void
dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int txgoff = tx->tx_txg & TXG_MASK;
int nblkptr = dn->dn_phys->dn_nblkptr;
int old_toplvl = dn->dn_phys->dn_nlevels - 1;
int new_level = dn->dn_next_nlevels[txgoff];
int i;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
/* this dnode can't be paged out because it's dirty */
ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
ASSERT(db != NULL);
dn->dn_phys->dn_nlevels = new_level;
dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
(u_longlong_t)dn->dn_object, dn->dn_phys->dn_nlevels);
/*
* Lock ordering requires that we hold the children's db_mutexes (by
* calling dbuf_find()) before holding the parent's db_rwlock. The lock
* order is imposed by dbuf_read's steps of "grab the lock to protect
* db_parent, get db_parent, hold db_parent's db_rwlock".
*/
dmu_buf_impl_t *children[DN_MAX_NBLKPTR];
ASSERT3U(nblkptr, <=, DN_MAX_NBLKPTR);
for (i = 0; i < nblkptr; i++) {
children[i] = dbuf_find(dn->dn_objset, dn->dn_object,
old_toplvl, i, NULL);
}
/* transfer dnode's block pointers to new indirect block */
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
if (dn->dn_dbuf != NULL)
rw_enter(&dn->dn_dbuf->db_rwlock, RW_WRITER);
rw_enter(&db->db_rwlock, RW_WRITER);
ASSERT(db->db.db_data);
ASSERT(arc_released(db->db_buf));
ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
memcpy(db->db.db_data, dn->dn_phys->dn_blkptr,
sizeof (blkptr_t) * nblkptr);
arc_buf_freeze(db->db_buf);
/* set dbuf's parent pointers to new indirect buf */
for (i = 0; i < nblkptr; i++) {
dmu_buf_impl_t *child = children[i];
if (child == NULL)
continue;
#ifdef ZFS_DEBUG
DB_DNODE_ENTER(child);
ASSERT3P(DB_DNODE(child), ==, dn);
DB_DNODE_EXIT(child);
#endif /* DEBUG */
if (child->db_parent && child->db_parent != dn->dn_dbuf) {
ASSERT(child->db_parent->db_level == db->db_level);
ASSERT(child->db_blkptr !=
&dn->dn_phys->dn_blkptr[child->db_blkid]);
mutex_exit(&child->db_mtx);
continue;
}
ASSERT(child->db_parent == NULL ||
child->db_parent == dn->dn_dbuf);
child->db_parent = db;
dbuf_add_ref(db, child);
if (db->db.db_data)
child->db_blkptr = (blkptr_t *)db->db.db_data + i;
else
child->db_blkptr = NULL;
dprintf_dbuf_bp(child, child->db_blkptr,
"changed db_blkptr to new indirect %s", "");
mutex_exit(&child->db_mtx);
}
memset(dn->dn_phys->dn_blkptr, 0, sizeof (blkptr_t) * nblkptr);
rw_exit(&db->db_rwlock);
if (dn->dn_dbuf != NULL)
rw_exit(&dn->dn_dbuf->db_rwlock);
dbuf_rele(db, FTAG);
rw_exit(&dn->dn_struct_rwlock);
}
static void
free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
{
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
uint64_t bytesfreed = 0;
dprintf("ds=%p obj=%llx num=%d\n", ds, (u_longlong_t)dn->dn_object,
num);
for (int i = 0; i < num; i++, bp++) {
if (BP_IS_HOLE(bp))
continue;
bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
/*
* Save some useful information on the holes being
* punched, including logical size, type, and indirection
* level. Retaining birth time enables detection of when
* holes are punched for reducing the number of free
* records transmitted during a zfs send.
*/
uint64_t lsize = BP_GET_LSIZE(bp);
dmu_object_type_t type = BP_GET_TYPE(bp);
uint64_t lvl = BP_GET_LEVEL(bp);
memset(bp, 0, sizeof (blkptr_t));
if (spa_feature_is_active(dn->dn_objset->os_spa,
SPA_FEATURE_HOLE_BIRTH)) {
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, type);
BP_SET_LEVEL(bp, lvl);
BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
}
}
dnode_diduse_space(dn, -bytesfreed);
}
#ifdef ZFS_DEBUG
static void
free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
{
uint64_t off, num, i, j;
unsigned int epbs;
int err;
uint64_t txg = tx->tx_txg;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
off = start - (db->db_blkid << epbs);
num = end - start + 1;
ASSERT3U(dn->dn_phys->dn_indblkshift, >=, SPA_BLKPTRSHIFT);
ASSERT3U(end + 1, >=, start);
ASSERT3U(start, >=, (db->db_blkid << epbs));
ASSERT3U(db->db_level, >, 0);
ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
ASSERT(db->db_blkptr != NULL);
for (i = off; i < off+num; i++) {
uint64_t *buf;
dmu_buf_impl_t *child;
dbuf_dirty_record_t *dr;
ASSERT(db->db_level == 1);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
err = dbuf_hold_impl(dn, db->db_level - 1,
(db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child);
rw_exit(&dn->dn_struct_rwlock);
if (err == ENOENT)
continue;
ASSERT(err == 0);
ASSERT(child->db_level == 0);
dr = dbuf_find_dirty_eq(child, txg);
/* data_old better be zeroed */
if (dr) {
buf = dr->dt.dl.dr_data->b_data;
for (j = 0; j < child->db.db_size >> 3; j++) {
if (buf[j] != 0) {
panic("freed data not zero: "
"child=%p i=%llu off=%llu "
"num=%llu\n",
(void *)child, (u_longlong_t)i,
(u_longlong_t)off,
(u_longlong_t)num);
}
}
}
/*
* db_data better be zeroed unless it's dirty in a
* future txg.
*/
mutex_enter(&child->db_mtx);
buf = child->db.db_data;
if (buf != NULL && child->db_state != DB_FILL &&
list_is_empty(&child->db_dirty_records)) {
for (j = 0; j < child->db.db_size >> 3; j++) {
if (buf[j] != 0) {
panic("freed data not zero: "
"child=%p i=%llu off=%llu "
"num=%llu\n",
(void *)child, (u_longlong_t)i,
(u_longlong_t)off,
(u_longlong_t)num);
}
}
}
mutex_exit(&child->db_mtx);
dbuf_rele(child, FTAG);
}
DB_DNODE_EXIT(db);
}
#endif
/*
* We don't usually free the indirect blocks here. If in one txg we have a
* free_range and a write to the same indirect block, it's important that we
* preserve the hole's birth times. Therefore, we don't free any any indirect
* blocks in free_children(). If an indirect block happens to turn into all
* holes, it will be freed by dbuf_write_children_ready, which happens at a
* point in the syncing process where we know for certain the contents of the
* indirect block.
*
* However, if we're freeing a dnode, its space accounting must go to zero
* before we actually try to free the dnode, or we will trip an assertion. In
* addition, we know the case described above cannot occur, because the dnode is
* being freed. Therefore, we free the indirect blocks immediately in that
* case.
*/
static void
free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
boolean_t free_indirects, dmu_tx_t *tx)
{
dnode_t *dn;
blkptr_t *bp;
dmu_buf_impl_t *subdb;
uint64_t start, end, dbstart, dbend;
unsigned int epbs, shift, i;
/*
* There is a small possibility that this block will not be cached:
* 1 - if level > 1 and there are no children with level <= 1
* 2 - if this block was evicted since we read it from
* dmu_tx_hold_free().
*/
if (db->db_state != DB_CACHED)
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
/*
* If we modify this indirect block, and we are not freeing the
* dnode (!free_indirects), then this indirect block needs to get
* written to disk by dbuf_write(). If it is dirty, we know it will
* be written (otherwise, we would have incorrect on-disk state
* because the space would be freed but still referenced by the BP
* in this indirect block). Therefore we VERIFY that it is
* dirty.
*
* Our VERIFY covers some cases that do not actually have to be
* dirty, but the open-context code happens to dirty. E.g. if the
* blocks we are freeing are all holes, because in that case, we
* are only freeing part of this indirect block, so it is an
* ancestor of the first or last block to be freed. The first and
* last L1 indirect blocks are always dirtied by dnode_free_range().
*/
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0);
dmu_buf_unlock_parent(db, dblt, FTAG);
dbuf_release_bp(db);
bp = db->db.db_data;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(epbs, <, 31);
shift = (db->db_level - 1) * epbs;
dbstart = db->db_blkid << epbs;
start = blkid >> shift;
if (dbstart < start) {
bp += start - dbstart;
} else {
start = dbstart;
}
dbend = ((db->db_blkid + 1) << epbs) - 1;
end = (blkid + nblks - 1) >> shift;
if (dbend <= end)
end = dbend;
ASSERT3U(start, <=, end);
if (db->db_level == 1) {
FREE_VERIFY(db, start, end, tx);
rw_enter(&db->db_rwlock, RW_WRITER);
free_blocks(dn, bp, end - start + 1, tx);
rw_exit(&db->db_rwlock);
} else {
for (uint64_t id = start; id <= end; id++, bp++) {
if (BP_IS_HOLE(bp))
continue;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
id, TRUE, FALSE, FTAG, &subdb));
rw_exit(&dn->dn_struct_rwlock);
ASSERT3P(bp, ==, subdb->db_blkptr);
free_children(subdb, blkid, nblks, free_indirects, tx);
dbuf_rele(subdb, FTAG);
}
}
if (free_indirects) {
rw_enter(&db->db_rwlock, RW_WRITER);
for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++)
ASSERT(BP_IS_HOLE(bp));
memset(db->db.db_data, 0, db->db.db_size);
free_blocks(dn, db->db_blkptr, 1, tx);
rw_exit(&db->db_rwlock);
}
DB_DNODE_EXIT(db);
arc_buf_freeze(db->db_buf);
}
/*
* Traverse the indicated range of the provided file
* and "free" all the blocks contained there.
*/
static void
dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
boolean_t free_indirects, dmu_tx_t *tx)
{
blkptr_t *bp = dn->dn_phys->dn_blkptr;
int dnlevel = dn->dn_phys->dn_nlevels;
boolean_t trunc = B_FALSE;
if (blkid > dn->dn_phys->dn_maxblkid)
return;
ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
trunc = B_TRUE;
}
/* There are no indirect blocks in the object */
if (dnlevel == 1) {
if (blkid >= dn->dn_phys->dn_nblkptr) {
/* this range was never made persistent */
return;
}
ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
free_blocks(dn, bp + blkid, nblks, tx);
} else {
int shift = (dnlevel - 1) *
(dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
int start = blkid >> shift;
int end = (blkid + nblks - 1) >> shift;
dmu_buf_impl_t *db;
ASSERT(start < dn->dn_phys->dn_nblkptr);
bp += start;
for (int i = start; i <= end; i++, bp++) {
if (BP_IS_HOLE(bp))
continue;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
TRUE, FALSE, FTAG, &db));
rw_exit(&dn->dn_struct_rwlock);
free_children(db, blkid, nblks, free_indirects, tx);
dbuf_rele(db, FTAG);
}
}
/*
* Do not truncate the maxblkid if we are performing a raw
* receive. The raw receive sets the maxblkid manually and
* must not be overridden. Usually, the last DRR_FREE record
* will be at the maxblkid, because the source system sets
* the maxblkid when truncating. However, if the last block
* was freed by overwriting with zeros and being compressed
* away to a hole, the source system will generate a DRR_FREE
* record while leaving the maxblkid after the end of that
* record. In this case we need to leave the maxblkid as
* indicated in the DRR_OBJECT record, so that it matches the
* source system, ensuring that the cryptographic hashes will
* match.
*/
if (trunc && !dn->dn_objset->os_raw_receive) {
uint64_t off __maybe_unused;
dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
off = (dn->dn_phys->dn_maxblkid + 1) *
(dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT(off < dn->dn_phys->dn_maxblkid ||
dn->dn_phys->dn_maxblkid == 0 ||
dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
}
}
typedef struct dnode_sync_free_range_arg {
dnode_t *dsfra_dnode;
dmu_tx_t *dsfra_tx;
boolean_t dsfra_free_indirects;
} dnode_sync_free_range_arg_t;
static void
dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
{
dnode_sync_free_range_arg_t *dsfra = arg;
dnode_t *dn = dsfra->dsfra_dnode;
mutex_exit(&dn->dn_mtx);
dnode_sync_free_range_impl(dn, blkid, nblks,
dsfra->dsfra_free_indirects, dsfra->dsfra_tx);
mutex_enter(&dn->dn_mtx);
}
/*
* Try to kick all the dnode's dbufs out of the cache...
*/
void
dnode_evict_dbufs(dnode_t *dn)
{
dmu_buf_impl_t *db_marker;
dmu_buf_impl_t *db, *db_next;
db_marker = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
#ifdef ZFS_DEBUG
DB_DNODE_ENTER(db);
ASSERT3P(DB_DNODE(db), ==, dn);
DB_DNODE_EXIT(db);
#endif /* DEBUG */
mutex_enter(&db->db_mtx);
if (db->db_state != DB_EVICTING &&
zfs_refcount_is_zero(&db->db_holds)) {
db_marker->db_level = db->db_level;
db_marker->db_blkid = db->db_blkid;
/*
* Insert a MARKER node with the same level and blkid.
* And to resolve any ties in dbuf_compare() use the
* pointer of the dbuf that we are evicting. Pass the
* address in db_parent.
*/
db_marker->db_state = DB_MARKER;
db_marker->db_parent = (void *)((uintptr_t)db - 1);
avl_insert_here(&dn->dn_dbufs, db_marker, db,
AVL_BEFORE);
/*
* We need to use the "marker" dbuf rather than
* simply getting the next dbuf, because
* dbuf_destroy() may actually remove multiple dbufs.
* It can call itself recursively on the parent dbuf,
* which may also be removed from dn_dbufs. The code
* flow would look like:
*
* dbuf_destroy():
* dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
* if (!cacheable || pending_evict)
* dbuf_destroy()
*/
dbuf_destroy(db);
db_next = AVL_NEXT(&dn->dn_dbufs, db_marker);
avl_remove(&dn->dn_dbufs, db_marker);
} else {
db->db_pending_evict = TRUE;
mutex_exit(&db->db_mtx);
db_next = AVL_NEXT(&dn->dn_dbufs, db);
}
}
mutex_exit(&dn->dn_dbufs_mtx);
kmem_free(db_marker, sizeof (dmu_buf_impl_t));
dnode_evict_bonus(dn);
}
void
dnode_evict_bonus(dnode_t *dn)
{
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_bonus != NULL) {
if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
dn->dn_bonus = NULL;
} else {
dn->dn_bonus->db_pending_evict = TRUE;
}
}
rw_exit(&dn->dn_struct_rwlock);
}
static void
dnode_undirty_dbufs(list_t *list)
{
dbuf_dirty_record_t *dr;
while ((dr = list_head(list))) {
dmu_buf_impl_t *db = dr->dr_dbuf;
uint64_t txg = dr->dr_txg;
if (db->db_level != 0)
dnode_undirty_dbufs(&dr->dt.di.dr_children);
mutex_enter(&db->db_mtx);
/* XXX - use dbuf_undirty()? */
list_remove(list, dr);
ASSERT(list_head(&db->db_dirty_records) == dr);
list_remove_head(&db->db_dirty_records);
ASSERT(list_is_empty(&db->db_dirty_records));
db->db_dirtycnt -= 1;
if (db->db_level == 0) {
ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_data == db->db_buf);
dbuf_unoverride(dr);
} else {
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
kmem_cache_free(dbuf_dirty_kmem_cache, dr);
dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
}
}
static void
dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
{
int txgoff = tx->tx_txg & TXG_MASK;
ASSERT(dmu_tx_is_syncing(tx));
/*
* Our contents should have been freed in dnode_sync() by the
* free range record inserted by the caller of dnode_free().
*/
ASSERT0(DN_USED_BYTES(dn->dn_phys));
ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
dnode_evict_dbufs(dn);
/*
* XXX - It would be nice to assert this, but we may still
* have residual holds from async evictions from the arc...
*
* zfs_obj_to_path() also depends on this being
* commented out.
*
* ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
*/
/* Undirty next bits */
dn->dn_next_nlevels[txgoff] = 0;
dn->dn_next_indblkshift[txgoff] = 0;
dn->dn_next_blksz[txgoff] = 0;
dn->dn_next_maxblkid[txgoff] = 0;
/* ASSERT(blkptrs are zero); */
ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
ASSERT(dn->dn_type != DMU_OT_NONE);
ASSERT(dn->dn_free_txg > 0);
if (dn->dn_allocated_txg != dn->dn_free_txg)
dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
memset(dn->dn_phys, 0, sizeof (dnode_phys_t) * dn->dn_num_slots);
dnode_free_interior_slots(dn);
mutex_enter(&dn->dn_mtx);
dn->dn_type = DMU_OT_NONE;
dn->dn_maxblkid = 0;
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_have_spill = B_FALSE;
dn->dn_num_slots = 1;
mutex_exit(&dn->dn_mtx);
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
/*
* Now that we've released our hold, the dnode may
* be evicted, so we mustn't access it.
*/
}
/*
* Write out the dnode's dirty buffers.
* Does not wait for zio completions.
*/
void
dnode_sync(dnode_t *dn, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
dnode_phys_t *dnp = dn->dn_phys;
int txgoff = tx->tx_txg & TXG_MASK;
list_t *list = &dn->dn_dirty_records[txgoff];
static const dnode_phys_t zerodn __maybe_unused = { 0 };
boolean_t kill_spill = B_FALSE;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
ASSERT(dnp->dn_type != DMU_OT_NONE ||
memcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
DNODE_VERIFY(dn);
ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
/*
* Do user accounting if it is enabled and this is not
* an encrypted receive.
*/
if (dmu_objset_userused_enabled(os) &&
!DMU_OBJECT_IS_SPECIAL(dn->dn_object) &&
(!os->os_encrypted || !dmu_objset_is_receiving(os))) {
mutex_enter(&dn->dn_mtx);
dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
dn->dn_oldflags = dn->dn_phys->dn_flags;
dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
if (dmu_objset_userobjused_enabled(dn->dn_objset))
dn->dn_phys->dn_flags |=
DNODE_FLAG_USEROBJUSED_ACCOUNTED;
mutex_exit(&dn->dn_mtx);
dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
} else if (!(os->os_encrypted && dmu_objset_is_receiving(os))) {
/*
* Once we account for it, we should always account for it,
* except for the case of a raw receive. We will not be able
* to account for it until the receiving dataset has been
* mounted.
*/
ASSERT(!(dn->dn_phys->dn_flags &
DNODE_FLAG_USERUSED_ACCOUNTED));
ASSERT(!(dn->dn_phys->dn_flags &
DNODE_FLAG_USEROBJUSED_ACCOUNTED));
}
mutex_enter(&dn->dn_mtx);
if (dn->dn_allocated_txg == tx->tx_txg) {
/* The dnode is newly allocated or reallocated */
if (dnp->dn_type == DMU_OT_NONE) {
/* this is a first alloc, not a realloc */
dnp->dn_nlevels = 1;
dnp->dn_nblkptr = dn->dn_nblkptr;
}
dnp->dn_type = dn->dn_type;
dnp->dn_bonustype = dn->dn_bonustype;
dnp->dn_bonuslen = dn->dn_bonuslen;
}
dnp->dn_extra_slots = dn->dn_num_slots - 1;
ASSERT(dnp->dn_nlevels > 1 ||
BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT(dnp->dn_nlevels < 2 ||
BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
if (dn->dn_next_type[txgoff] != 0) {
dnp->dn_type = dn->dn_type;
dn->dn_next_type[txgoff] = 0;
}
if (dn->dn_next_blksz[txgoff] != 0) {
ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
SPA_MINBLOCKSIZE) == 0);
ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
dn->dn_maxblkid == 0 || list_head(list) != NULL ||
dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
dnp->dn_datablkszsec ||
- !range_tree_is_empty(dn->dn_free_ranges[txgoff]));
+ !zfs_range_tree_is_empty(dn->dn_free_ranges[txgoff]));
dnp->dn_datablkszsec =
dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
dn->dn_next_blksz[txgoff] = 0;
}
if (dn->dn_next_bonuslen[txgoff] != 0) {
if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
dnp->dn_bonuslen = 0;
else
dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
ASSERT(dnp->dn_bonuslen <=
DN_SLOTS_TO_BONUSLEN(dnp->dn_extra_slots + 1));
dn->dn_next_bonuslen[txgoff] = 0;
}
if (dn->dn_next_bonustype[txgoff] != 0) {
ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
dn->dn_next_bonustype[txgoff] = 0;
}
boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
dn->dn_free_txg <= tx->tx_txg;
/*
* Remove the spill block if we have been explicitly asked to
* remove it, or if the object is being removed.
*/
if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
kill_spill = B_TRUE;
dn->dn_rm_spillblk[txgoff] = 0;
}
if (dn->dn_next_indblkshift[txgoff] != 0) {
ASSERT(dnp->dn_nlevels == 1);
dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
dn->dn_next_indblkshift[txgoff] = 0;
}
/*
* Just take the live (open-context) values for checksum and compress.
* Strictly speaking it's a future leak, but nothing bad happens if we
* start using the new checksum or compress algorithm a little early.
*/
dnp->dn_checksum = dn->dn_checksum;
dnp->dn_compress = dn->dn_compress;
mutex_exit(&dn->dn_mtx);
if (kill_spill) {
free_blocks(dn, DN_SPILL_BLKPTR(dn->dn_phys), 1, tx);
mutex_enter(&dn->dn_mtx);
dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
mutex_exit(&dn->dn_mtx);
}
/* process all the "freed" ranges in the file */
if (dn->dn_free_ranges[txgoff] != NULL) {
dnode_sync_free_range_arg_t dsfra;
dsfra.dsfra_dnode = dn;
dsfra.dsfra_tx = tx;
dsfra.dsfra_free_indirects = freeing_dnode;
mutex_enter(&dn->dn_mtx);
if (freeing_dnode) {
- ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff],
- 0, dn->dn_maxblkid + 1));
+ ASSERT(zfs_range_tree_contains(
+ dn->dn_free_ranges[txgoff], 0,
+ dn->dn_maxblkid + 1));
}
/*
* Because dnode_sync_free_range() must drop dn_mtx during its
- * processing, using it as a callback to range_tree_vacate() is
- * not safe. No other operations (besides destroy) are allowed
- * once range_tree_vacate() has begun, and dropping dn_mtx
- * would leave a window open for another thread to observe that
- * invalid (and unsafe) state.
+ * processing, using it as a callback to zfs_range_tree_vacate()
+ * is not safe. No other operations (besides destroy) are
+ * allowed once zfs_range_tree_vacate() has begun, and dropping
+ * dn_mtx would leave a window open for another thread to
+ * observe that invalid (and unsafe) state.
*/
- range_tree_walk(dn->dn_free_ranges[txgoff],
+ zfs_range_tree_walk(dn->dn_free_ranges[txgoff],
dnode_sync_free_range, &dsfra);
- range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL);
- range_tree_destroy(dn->dn_free_ranges[txgoff]);
+ zfs_range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL);
+ zfs_range_tree_destroy(dn->dn_free_ranges[txgoff]);
dn->dn_free_ranges[txgoff] = NULL;
mutex_exit(&dn->dn_mtx);
}
if (freeing_dnode) {
dn->dn_objset->os_freed_dnodes++;
dnode_sync_free(dn, tx);
return;
}
if (dn->dn_num_slots > DNODE_MIN_SLOTS) {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
mutex_enter(&ds->ds_lock);
ds->ds_feature_activation[SPA_FEATURE_LARGE_DNODE] =
(void *)B_TRUE;
mutex_exit(&ds->ds_lock);
}
if (dn->dn_next_nlevels[txgoff]) {
dnode_increase_indirection(dn, tx);
dn->dn_next_nlevels[txgoff] = 0;
}
/*
* This must be done after dnode_sync_free_range()
* and dnode_increase_indirection(). See dnode_new_blkid()
* for an explanation of the high bit being set.
*/
if (dn->dn_next_maxblkid[txgoff]) {
mutex_enter(&dn->dn_mtx);
dnp->dn_maxblkid =
dn->dn_next_maxblkid[txgoff] & ~DMU_NEXT_MAXBLKID_SET;
dn->dn_next_maxblkid[txgoff] = 0;
mutex_exit(&dn->dn_mtx);
}
if (dn->dn_next_nblkptr[txgoff]) {
/* this should only happen on a realloc */
ASSERT(dn->dn_allocated_txg == tx->tx_txg);
if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
/* zero the new blkptrs we are gaining */
memset(dnp->dn_blkptr + dnp->dn_nblkptr, 0,
sizeof (blkptr_t) *
(dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
#ifdef ZFS_DEBUG
} else {
int i;
ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
/* the blkptrs we are losing better be unallocated */
for (i = 0; i < dnp->dn_nblkptr; i++) {
if (i >= dn->dn_next_nblkptr[txgoff])
ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
}
#endif
}
mutex_enter(&dn->dn_mtx);
dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
dn->dn_next_nblkptr[txgoff] = 0;
mutex_exit(&dn->dn_mtx);
}
dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
ASSERT3P(list_head(list), ==, NULL);
dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
}
ASSERT3U(dnp->dn_bonuslen, <=, DN_MAX_BONUS_LEN(dnp));
/*
* Although we have dropped our reference to the dnode, it
* can't be evicted until its written, and we haven't yet
* initiated the IO for the dnode's dbuf. Additionally, the caller
* has already added a reference to the dnode because it's on the
* os_synced_dnodes list.
*/
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_pool.c b/sys/contrib/openzfs/module/zfs/dsl_pool.c
index 39f97d7547c6..b98ff69191de 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_pool.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_pool.c
@@ -1,1495 +1,1495 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_scan.h>
#include <sys/dnode.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/bptree.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dsl_userhold.h>
#include <sys/trace_zfs.h>
#include <sys/mmp.h>
/*
* ZFS Write Throttle
* ------------------
*
* ZFS must limit the rate of incoming writes to the rate at which it is able
* to sync data modifications to the backend storage. Throttling by too much
* creates an artificial limit; throttling by too little can only be sustained
* for short periods and would lead to highly lumpy performance. On a per-pool
* basis, ZFS tracks the amount of modified (dirty) data. As operations change
* data, the amount of dirty data increases; as ZFS syncs out data, the amount
* of dirty data decreases. When the amount of dirty data exceeds a
* predetermined threshold further modifications are blocked until the amount
* of dirty data decreases (as data is synced out).
*
* The limit on dirty data is tunable, and should be adjusted according to
* both the IO capacity and available memory of the system. The larger the
* window, the more ZFS is able to aggregate and amortize metadata (and data)
* changes. However, memory is a limited resource, and allowing for more dirty
* data comes at the cost of keeping other useful data in memory (for example
* ZFS data cached by the ARC).
*
* Implementation
*
* As buffers are modified dsl_pool_willuse_space() increments both the per-
* txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
* dirty space used; dsl_pool_dirty_space() decrements those values as data
* is synced out from dsl_pool_sync(). While only the poolwide value is
* relevant, the per-txg value is useful for debugging. The tunable
* zfs_dirty_data_max determines the dirty space limit. Once that value is
* exceeded, new writes are halted until space frees up.
*
* The zfs_dirty_data_sync_percent tunable dictates the threshold at which we
* ensure that there is a txg syncing (see the comment in txg.c for a full
* description of transaction group stages).
*
* The IO scheduler uses both the dirty space limit and current amount of
* dirty data as inputs. Those values affect the number of concurrent IOs ZFS
* issues. See the comment in vdev_queue.c for details of the IO scheduler.
*
* The delay is also calculated based on the amount of dirty data. See the
* comment above dmu_tx_delay() for details.
*/
/*
* zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
* capped at zfs_dirty_data_max_max. It can also be overridden with a module
* parameter.
*/
uint64_t zfs_dirty_data_max = 0;
uint64_t zfs_dirty_data_max_max = 0;
uint_t zfs_dirty_data_max_percent = 10;
uint_t zfs_dirty_data_max_max_percent = 25;
/*
* The upper limit of TX_WRITE log data. Write operations are throttled
* when approaching the limit until log data is cleared out after txg sync.
* It only counts TX_WRITE log with WR_COPIED or WR_NEED_COPY.
*/
uint64_t zfs_wrlog_data_max = 0;
/*
* If there's at least this much dirty data (as a percentage of
* zfs_dirty_data_max), push out a txg. This should be less than
* zfs_vdev_async_write_active_min_dirty_percent.
*/
static uint_t zfs_dirty_data_sync_percent = 20;
/*
* Once there is this amount of dirty data, the dmu_tx_delay() will kick in
* and delay each transaction.
* This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
*/
uint_t zfs_delay_min_dirty_percent = 60;
/*
* This controls how quickly the delay approaches infinity.
* Larger values cause it to delay more for a given amount of dirty data.
* Therefore larger values will cause there to be less dirty data for a
* given throughput.
*
* For the smoothest delay, this value should be about 1 billion divided
* by the maximum number of operations per second. This will smoothly
* handle between 10x and 1/10th this number.
*
* Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
* multiply in dmu_tx_delay().
*/
uint64_t zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
/*
* These tunables determine the behavior of how zil_itxg_clean() is
* called via zil_clean() in the context of spa_sync(). When an itxg
* list needs to be cleaned, TQ_NOSLEEP will be used when dispatching.
* If the dispatch fails, the call to zil_itxg_clean() will occur
* synchronously in the context of spa_sync(), which can negatively
* impact the performance of spa_sync() (e.g. in the case of the itxg
* list having a large number of itxs that needs to be cleaned).
*
* Thus, these tunables can be used to manipulate the behavior of the
* taskq used by zil_clean(); they determine the number of taskq entries
* that are pre-populated when the taskq is first created (via the
* "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of
* taskq entries that are cached after an on-demand allocation (via the
* "zfs_zil_clean_taskq_maxalloc").
*
* The idea being, we want to try reasonably hard to ensure there will
* already be a taskq entry pre-allocated by the time that it is needed
* by zil_clean(). This way, we can avoid the possibility of an
* on-demand allocation of a new taskq entry from failing, which would
* result in zil_itxg_clean() being called synchronously from zil_clean()
* (which can adversely affect performance of spa_sync()).
*
* Additionally, the number of threads used by the taskq can be
* configured via the "zfs_zil_clean_taskq_nthr_pct" tunable.
*/
static int zfs_zil_clean_taskq_nthr_pct = 100;
static int zfs_zil_clean_taskq_minalloc = 1024;
static int zfs_zil_clean_taskq_maxalloc = 1024 * 1024;
int
dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
{
uint64_t obj;
int err;
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj,
name, sizeof (obj), 1, &obj);
if (err)
return (err);
return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
}
static dsl_pool_t *
dsl_pool_open_impl(spa_t *spa, uint64_t txg)
{
dsl_pool_t *dp;
blkptr_t *bp = spa_get_rootblkptr(spa);
dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
dp->dp_spa = spa;
dp->dp_meta_rootbp = *bp;
rrw_init(&dp->dp_config_rwlock, B_TRUE);
txg_init(dp, txg);
mmp_init(spa);
txg_list_create(&dp->dp_dirty_datasets, spa,
offsetof(dsl_dataset_t, ds_dirty_link));
txg_list_create(&dp->dp_dirty_zilogs, spa,
offsetof(zilog_t, zl_dirty_link));
txg_list_create(&dp->dp_dirty_dirs, spa,
offsetof(dsl_dir_t, dd_dirty_link));
txg_list_create(&dp->dp_sync_tasks, spa,
offsetof(dsl_sync_task_t, dst_node));
txg_list_create(&dp->dp_early_sync_tasks, spa,
offsetof(dsl_sync_task_t, dst_node));
dp->dp_sync_taskq = spa_sync_tq_create(spa, "dp_sync_taskq");
dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq",
zfs_zil_clean_taskq_nthr_pct, minclsyspri,
zfs_zil_clean_taskq_minalloc,
zfs_zil_clean_taskq_maxalloc,
TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
aggsum_init(&dp->dp_wrlog_total, 0);
for (int i = 0; i < TXG_SIZE; i++) {
aggsum_init(&dp->dp_wrlog_pertxg[i], 0);
}
dp->dp_zrele_taskq = taskq_create("z_zrele", 100, defclsyspri,
boot_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC |
TASKQ_THREADS_CPU_PCT);
dp->dp_unlinked_drain_taskq = taskq_create("z_unlinked_drain",
100, defclsyspri, boot_ncpus, INT_MAX,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
return (dp);
}
int
dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
{
int err;
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
/*
* Initialize the caller's dsl_pool_t structure before we actually open
* the meta objset. This is done because a self-healing write zio may
* be issued as part of dmu_objset_open_impl() and the spa needs its
* dsl_pool_t initialized in order to handle the write.
*/
*dpp = dp;
err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
&dp->dp_meta_objset);
if (err != 0) {
dsl_pool_close(dp);
*dpp = NULL;
}
return (err);
}
int
dsl_pool_open(dsl_pool_t *dp)
{
int err;
dsl_dir_t *dd;
dsl_dataset_t *ds;
uint64_t obj;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
&dp->dp_root_dir_obj);
if (err)
goto out;
err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
NULL, dp, &dp->dp_root_dir);
if (err)
goto out;
err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
if (err)
goto out;
if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
if (err)
goto out;
err = dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds);
if (err == 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, dp,
&dp->dp_origin_snap);
dsl_dataset_rele(ds, FTAG);
}
dsl_dir_rele(dd, dp);
if (err)
goto out;
}
if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
&dp->dp_free_dir);
if (err)
goto out;
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
if (err)
goto out;
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
dp->dp_meta_objset, obj));
}
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj);
if (err == 0) {
VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj,
dp->dp_meta_objset, obj));
} else if (err == ENOENT) {
/*
* We might not have created the remap bpobj yet.
*/
} else {
goto out;
}
}
/*
* Note: errors ignored, because the these special dirs, used for
* space accounting, are only created on demand.
*/
(void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME,
&dp->dp_leak_dir);
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
&dp->dp_bptree_obj);
if (err != 0)
goto out;
}
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
&dp->dp_empty_bpobj);
if (err != 0)
goto out;
}
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
&dp->dp_tmp_userrefs_obj);
if (err == ENOENT)
err = 0;
if (err)
goto out;
err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
out:
rrw_exit(&dp->dp_config_rwlock, FTAG);
return (err);
}
void
dsl_pool_close(dsl_pool_t *dp)
{
/*
* Drop our references from dsl_pool_open().
*
* Since we held the origin_snap from "syncing" context (which
* includes pool-opening context), it actually only got a "ref"
* and not a hold, so just drop that here.
*/
if (dp->dp_origin_snap != NULL)
dsl_dataset_rele(dp->dp_origin_snap, dp);
if (dp->dp_mos_dir != NULL)
dsl_dir_rele(dp->dp_mos_dir, dp);
if (dp->dp_free_dir != NULL)
dsl_dir_rele(dp->dp_free_dir, dp);
if (dp->dp_leak_dir != NULL)
dsl_dir_rele(dp->dp_leak_dir, dp);
if (dp->dp_root_dir != NULL)
dsl_dir_rele(dp->dp_root_dir, dp);
bpobj_close(&dp->dp_free_bpobj);
bpobj_close(&dp->dp_obsolete_bpobj);
/* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
if (dp->dp_meta_objset != NULL)
dmu_objset_evict(dp->dp_meta_objset);
txg_list_destroy(&dp->dp_dirty_datasets);
txg_list_destroy(&dp->dp_dirty_zilogs);
txg_list_destroy(&dp->dp_sync_tasks);
txg_list_destroy(&dp->dp_early_sync_tasks);
txg_list_destroy(&dp->dp_dirty_dirs);
taskq_destroy(dp->dp_zil_clean_taskq);
spa_sync_tq_destroy(dp->dp_spa);
if (dp->dp_spa->spa_state == POOL_STATE_EXPORTED ||
dp->dp_spa->spa_state == POOL_STATE_DESTROYED) {
/*
* On export/destroy perform the ARC flush asynchronously.
*/
arc_flush_async(dp->dp_spa);
} else {
/*
* We can't set retry to TRUE since we're explicitly specifying
* a spa to flush. This is good enough; any missed buffers for
* this spa won't cause trouble, and they'll eventually fall
* out of the ARC just like any other unused buffer.
*/
arc_flush(dp->dp_spa, FALSE);
}
mmp_fini(dp->dp_spa);
txg_fini(dp);
dsl_scan_fini(dp);
dmu_buf_user_evict_wait();
rrw_destroy(&dp->dp_config_rwlock);
mutex_destroy(&dp->dp_lock);
cv_destroy(&dp->dp_spaceavail_cv);
ASSERT0(aggsum_value(&dp->dp_wrlog_total));
aggsum_fini(&dp->dp_wrlog_total);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(aggsum_value(&dp->dp_wrlog_pertxg[i]));
aggsum_fini(&dp->dp_wrlog_pertxg[i]);
}
taskq_destroy(dp->dp_unlinked_drain_taskq);
taskq_destroy(dp->dp_zrele_taskq);
if (dp->dp_blkstats != NULL)
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
kmem_free(dp, sizeof (dsl_pool_t));
}
void
dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
{
uint64_t obj;
/*
* Currently, we only create the obsolete_bpobj where there are
* indirect vdevs with referenced mappings.
*/
ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL));
/* create and open the obsolete_bpobj */
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj));
VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
void
dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
{
spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ, tx));
bpobj_free(dp->dp_meta_objset,
dp->dp_obsolete_bpobj.bpo_object, tx);
bpobj_close(&dp->dp_obsolete_bpobj);
}
dsl_pool_t *
dsl_pool_create(spa_t *spa, nvlist_t *zplprops __attribute__((unused)),
dsl_crypto_params_t *dcp, uint64_t txg)
{
int err;
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
#ifdef _KERNEL
objset_t *os;
#else
objset_t *os __attribute__((unused));
#endif
dsl_dataset_t *ds;
uint64_t obj;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
/* create and open the MOS (meta-objset) */
dp->dp_meta_objset = dmu_objset_create_impl(spa,
NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
spa->spa_meta_objset = dp->dp_meta_objset;
/* create the pool directory */
err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
ASSERT0(err);
/* Initialize scan structures */
VERIFY0(dsl_scan_init(dp, txg));
/* create and open the root dir */
dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
NULL, dp, &dp->dp_root_dir));
/* create and open the meta-objset dir */
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
MOS_DIR_NAME, &dp->dp_mos_dir));
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
/* create and open the free dir */
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
FREE_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
FREE_DIR_NAME, &dp->dp_free_dir));
/* create and open the free_bplist */
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
dp->dp_meta_objset, obj));
}
if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
dsl_pool_create_origin(dp, tx);
/*
* Some features may be needed when creating the root dataset, so we
* create the feature objects here.
*/
if (spa_version(spa) >= SPA_VERSION_FEATURES)
spa_feature_create_zap_objects(spa, tx);
if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT)
spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx);
/* create the root dataset */
obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx);
/* create the root objset */
VERIFY0(dsl_dataset_hold_obj_flags(dp, obj,
DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
os = dmu_objset_create_impl(dp->dp_spa, ds,
dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
#ifdef _KERNEL
zfs_create_fs(os, kcred, zplprops, tx);
#endif
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
dmu_tx_commit(tx);
rrw_exit(&dp->dp_config_rwlock, FTAG);
return (dp);
}
/*
* Account for the meta-objset space in its placeholder dsl_dir.
*/
void
dsl_pool_mos_diduse_space(dsl_pool_t *dp,
int64_t used, int64_t comp, int64_t uncomp)
{
ASSERT3U(comp, ==, uncomp); /* it's all metadata */
mutex_enter(&dp->dp_lock);
dp->dp_mos_used_delta += used;
dp->dp_mos_compressed_delta += comp;
dp->dp_mos_uncompressed_delta += uncomp;
mutex_exit(&dp->dp_lock);
}
static void
dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
{
zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
dmu_objset_sync(dp->dp_meta_objset, zio, tx);
VERIFY0(zio_wait(zio));
dmu_objset_sync_done(dp->dp_meta_objset, tx);
taskq_wait(dp->dp_sync_taskq);
multilist_destroy(&dp->dp_meta_objset->os_synced_dnodes);
dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
}
static void
dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
{
ASSERT(MUTEX_HELD(&dp->dp_lock));
if (delta < 0)
ASSERT3U(-delta, <=, dp->dp_dirty_total);
dp->dp_dirty_total += delta;
/*
* Note: we signal even when increasing dp_dirty_total.
* This ensures forward progress -- each thread wakes the next waiter.
*/
if (dp->dp_dirty_total < zfs_dirty_data_max)
cv_signal(&dp->dp_spaceavail_cv);
}
void
dsl_pool_wrlog_count(dsl_pool_t *dp, int64_t size, uint64_t txg)
{
ASSERT3S(size, >=, 0);
aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], size);
aggsum_add(&dp->dp_wrlog_total, size);
/* Choose a value slightly bigger than min dirty sync bytes */
uint64_t sync_min =
zfs_wrlog_data_max * (zfs_dirty_data_sync_percent + 10) / 200;
if (aggsum_compare(&dp->dp_wrlog_pertxg[txg & TXG_MASK], sync_min) > 0)
txg_kick(dp, txg);
}
boolean_t
dsl_pool_need_wrlog_delay(dsl_pool_t *dp)
{
uint64_t delay_min_bytes =
zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
return (aggsum_compare(&dp->dp_wrlog_total, delay_min_bytes) > 0);
}
static void
dsl_pool_wrlog_clear(dsl_pool_t *dp, uint64_t txg)
{
int64_t delta;
delta = -(int64_t)aggsum_value(&dp->dp_wrlog_pertxg[txg & TXG_MASK]);
aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], delta);
aggsum_add(&dp->dp_wrlog_total, delta);
/* Compact per-CPU sums after the big change. */
(void) aggsum_value(&dp->dp_wrlog_pertxg[txg & TXG_MASK]);
(void) aggsum_value(&dp->dp_wrlog_total);
}
#ifdef ZFS_DEBUG
static boolean_t
dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg)
{
spa_t *spa = dp->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
txg_list_t *tl = &vd->vdev_ms_list;
metaslab_t *ms;
for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms;
ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) {
- VERIFY(range_tree_is_empty(ms->ms_freeing));
- VERIFY(range_tree_is_empty(ms->ms_checkpointing));
+ VERIFY(zfs_range_tree_is_empty(ms->ms_freeing));
+ VERIFY(zfs_range_tree_is_empty(ms->ms_checkpointing));
}
}
return (B_TRUE);
}
#else
#define dsl_early_sync_task_verify(dp, txg) \
((void) sizeof (dp), (void) sizeof (txg), B_TRUE)
#endif
void
dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
{
zio_t *rio; /* root zio for all dirty dataset syncs */
dmu_tx_t *tx;
dsl_dir_t *dd;
dsl_dataset_t *ds;
objset_t *mos = dp->dp_meta_objset;
list_t synced_datasets;
list_create(&synced_datasets, sizeof (dsl_dataset_t),
offsetof(dsl_dataset_t, ds_synced_link));
tx = dmu_tx_create_assigned(dp, txg);
/*
* Run all early sync tasks before writing out any dirty blocks.
* For more info on early sync tasks see block comment in
* dsl_early_sync_task().
*/
if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) {
dsl_sync_task_t *dst;
ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
while ((dst =
txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) {
ASSERT(dsl_early_sync_task_verify(dp, txg));
dsl_sync_task_sync(dst, tx);
}
ASSERT(dsl_early_sync_task_verify(dp, txg));
}
/*
* Write out all dirty blocks of dirty datasets. Note, this could
* create a very large (+10k) zio tree.
*/
rio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
/*
* We must not sync any non-MOS datasets twice, because
* we may have taken a snapshot of them. However, we
* may sync newly-created datasets on pass 2.
*/
ASSERT(!list_link_active(&ds->ds_synced_link));
list_insert_tail(&synced_datasets, ds);
dsl_dataset_sync(ds, rio, tx);
}
VERIFY0(zio_wait(rio));
/*
* Update the long range free counter after
* we're done syncing user data
*/
mutex_enter(&dp->dp_lock);
ASSERT(spa_sync_pass(dp->dp_spa) == 1 ||
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
mutex_exit(&dp->dp_lock);
/*
* After the data blocks have been written (ensured by the zio_wait()
* above), update the user/group/project space accounting. This happens
* in tasks dispatched to dp_sync_taskq, so wait for them before
* continuing.
*/
for (ds = list_head(&synced_datasets); ds != NULL;
ds = list_next(&synced_datasets, ds)) {
dmu_objset_sync_done(ds->ds_objset, tx);
}
taskq_wait(dp->dp_sync_taskq);
/*
* Sync the datasets again to push out the changes due to
* userspace updates. This must be done before we process the
* sync tasks, so that any snapshots will have the correct
* user accounting information (and we won't get confused
* about which blocks are part of the snapshot).
*/
rio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
objset_t *os = ds->ds_objset;
ASSERT(list_link_active(&ds->ds_synced_link));
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, rio, tx);
/*
* Release any key mappings created by calls to
* dsl_dataset_dirty() from the userquota accounting
* code paths.
*/
if (os->os_encrypted && !os->os_raw_receive &&
!os->os_next_write_raw[txg & TXG_MASK]) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
}
}
VERIFY0(zio_wait(rio));
/*
* Now that the datasets have been completely synced, we can
* clean up our in-memory structures accumulated while syncing:
*
* - move dead blocks from the pending deadlist and livelists
* to the on-disk versions
* - release hold from dsl_dataset_dirty()
* - release key mapping hold from dsl_dataset_dirty()
*/
while ((ds = list_remove_head(&synced_datasets)) != NULL) {
objset_t *os = ds->ds_objset;
if (os->os_encrypted && !os->os_raw_receive &&
!os->os_next_write_raw[txg & TXG_MASK]) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
}
dsl_dataset_sync_done(ds, tx);
dmu_buf_rele(ds->ds_dbuf, ds);
}
while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
dsl_dir_sync(dd, tx);
}
/*
* The MOS's space is accounted for in the pool/$MOS
* (dp_mos_dir). We can't modify the mos while we're syncing
* it, so we remember the deltas and apply them here.
*/
if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
dp->dp_mos_uncompressed_delta != 0) {
dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
dp->dp_mos_used_delta,
dp->dp_mos_compressed_delta,
dp->dp_mos_uncompressed_delta, tx);
dp->dp_mos_used_delta = 0;
dp->dp_mos_compressed_delta = 0;
dp->dp_mos_uncompressed_delta = 0;
}
if (dmu_objset_is_dirty(mos, txg)) {
dsl_pool_sync_mos(dp, tx);
}
/*
* We have written all of the accounted dirty data, so our
* dp_space_towrite should now be zero. However, some seldom-used
* code paths do not adhere to this (e.g. dbuf_undirty()). Shore up
* the accounting of any dirtied space now.
*
* Note that, besides any dirty data from datasets, the amount of
* dirty data in the MOS is also accounted by the pool. Therefore,
* we want to do this cleanup after dsl_pool_sync_mos() so we don't
* attempt to update the accounting for the same dirty data twice.
* (i.e. at this point we only update the accounting for the space
* that we know that we "leaked").
*/
dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
/*
* If we modify a dataset in the same txg that we want to destroy it,
* its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
* dsl_dir_destroy_check() will fail if there are unexpected holds.
* Therefore, we want to sync the MOS (thus syncing the dd_dbuf
* and clearing the hold on it) before we process the sync_tasks.
* The MOS data dirtied by the sync_tasks will be synced on the next
* pass.
*/
if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
dsl_sync_task_t *dst;
/*
* No more sync tasks should have been added while we
* were syncing.
*/
ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
dsl_sync_task_sync(dst, tx);
}
dmu_tx_commit(tx);
DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
}
void
dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
{
zilog_t *zilog;
while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) {
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
/*
* We don't remove the zilog from the dp_dirty_zilogs
* list until after we've cleaned it. This ensures that
* callers of zilog_is_dirty() receive an accurate
* answer when they are racing with the spa sync thread.
*/
zil_clean(zilog, txg);
(void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
dmu_buf_rele(ds->ds_dbuf, zilog);
}
dsl_pool_wrlog_clear(dp, txg);
ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
}
/*
* TRUE if the current thread is the tx_sync_thread or if we
* are being called from SPA context during pool initialization.
*/
int
dsl_pool_sync_context(dsl_pool_t *dp)
{
return (curthread == dp->dp_tx.tx_sync_thread ||
spa_is_initializing(dp->dp_spa) ||
taskq_member(dp->dp_sync_taskq, curthread));
}
/*
* This function returns the amount of allocatable space in the pool
* minus whatever space is currently reserved by ZFS for specific
* purposes. Specifically:
*
* 1] Any reserved SLOP space
* 2] Any space used by the checkpoint
* 3] Any space used for deferred frees
*
* The latter 2 are especially important because they are needed to
* rectify the SPA's and DMU's different understanding of how much space
* is used. Now the DMU is aware of that extra space tracked by the SPA
* without having to maintain a separate special dir (e.g similar to
* $MOS, $FREEING, and $LEAKED).
*
* Note: By deferred frees here, we mean the frees that were deferred
* in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the
* segments placed in ms_defer trees during metaslab_sync_done().
*/
uint64_t
dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy)
{
spa_t *spa = dp->dp_spa;
uint64_t space, resv, adjustedsize;
uint64_t spa_deferred_frees =
spa->spa_deferred_bpobj.bpo_phys->bpo_bytes;
space = spa_get_dspace(spa)
- spa_get_checkpoint_space(spa) - spa_deferred_frees;
resv = spa_get_slop_space(spa);
switch (slop_policy) {
case ZFS_SPACE_CHECK_NORMAL:
break;
case ZFS_SPACE_CHECK_RESERVED:
resv >>= 1;
break;
case ZFS_SPACE_CHECK_EXTRA_RESERVED:
resv >>= 2;
break;
case ZFS_SPACE_CHECK_NONE:
resv = 0;
break;
default:
panic("invalid slop policy value: %d", slop_policy);
break;
}
adjustedsize = (space >= resv) ? (space - resv) : 0;
return (adjustedsize);
}
uint64_t
dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy)
{
uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy);
uint64_t deferred =
metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0;
return (quota);
}
uint64_t
dsl_pool_deferred_space(dsl_pool_t *dp)
{
return (metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)));
}
boolean_t
dsl_pool_need_dirty_delay(dsl_pool_t *dp)
{
uint64_t delay_min_bytes =
zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
/*
* We are not taking the dp_lock here and few other places, since torn
* reads are unlikely: on 64-bit systems due to register size and on
* 32-bit due to memory constraints. Pool-wide locks in hot path may
* be too expensive, while we do not need a precise result here.
*/
return (dp->dp_dirty_total > delay_min_bytes);
}
static boolean_t
dsl_pool_need_dirty_sync(dsl_pool_t *dp, uint64_t txg)
{
uint64_t dirty_min_bytes =
zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
uint64_t dirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
return (dirty > dirty_min_bytes);
}
void
dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
{
if (space > 0) {
mutex_enter(&dp->dp_lock);
dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
dsl_pool_dirty_delta(dp, space);
boolean_t needsync = !dmu_tx_is_syncing(tx) &&
dsl_pool_need_dirty_sync(dp, tx->tx_txg);
mutex_exit(&dp->dp_lock);
if (needsync)
txg_kick(dp, tx->tx_txg);
}
}
void
dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
{
ASSERT3S(space, >=, 0);
if (space == 0)
return;
mutex_enter(&dp->dp_lock);
if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
/* XXX writing something we didn't dirty? */
space = dp->dp_dirty_pertxg[txg & TXG_MASK];
}
ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
ASSERT3U(dp->dp_dirty_total, >=, space);
dsl_pool_dirty_delta(dp, -space);
mutex_exit(&dp->dp_lock);
}
static int
upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
dmu_tx_t *tx = arg;
dsl_dataset_t *ds, *prev = NULL;
int err;
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err) {
dsl_dataset_rele(ds, FTAG);
return (err);
}
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object)
break;
dsl_dataset_rele(ds, FTAG);
ds = prev;
prev = NULL;
}
if (prev == NULL) {
prev = dp->dp_origin_snap;
/*
* The $ORIGIN can't have any data, or the accounting
* will be wrong.
*/
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT0(BP_GET_LOGICAL_BIRTH(&dsl_dataset_phys(prev)->ds_bp));
rrw_exit(&ds->ds_bp_rwlock, FTAG);
/* The origin doesn't get attached to itself */
if (ds->ds_object == prev->ds_object) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object;
dsl_dataset_phys(ds)->ds_prev_snap_txg =
dsl_dataset_phys(prev)->ds_creation_txg;
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object;
dmu_buf_will_dirty(prev->ds_dbuf, tx);
dsl_dataset_phys(prev)->ds_num_children++;
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
ASSERT(ds->ds_prev == NULL);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds, &ds->ds_prev));
}
}
ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object);
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object);
if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) {
dmu_buf_will_dirty(prev->ds_dbuf, tx);
dsl_dataset_phys(prev)->ds_next_clones_obj =
zap_create(dp->dp_meta_objset,
DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
dsl_dataset_rele(ds, FTAG);
if (prev != dp->dp_origin_snap)
dsl_dataset_rele(prev, FTAG);
return (0);
}
void
dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dp->dp_origin_snap != NULL);
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
}
static int
upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
dmu_tx_t *tx = arg;
objset_t *mos = dp->dp_meta_objset;
if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
dsl_dataset_t *origin;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
dsl_dir_phys(origin->ds_dir)->dd_clones =
zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(origin->ds_dir)->dd_clones,
ds->ds_object, tx));
dsl_dataset_rele(origin, FTAG);
}
return (0);
}
void
dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
{
uint64_t obj;
ASSERT(dmu_tx_is_syncing(tx));
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
FREE_DIR_NAME, &dp->dp_free_dir));
/*
* We can't use bpobj_alloc(), because spa_version() still
* returns the old version, and we need a new-version bpobj with
* subobj support. So call dmu_object_alloc() directly.
*/
obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
}
void
dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
{
uint64_t dsobj;
dsl_dataset_t *ds;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dp->dp_origin_snap == NULL);
ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
/* create the origin dir, ds, & snap-ds */
dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
NULL, 0, kcred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
dp, &dp->dp_origin_snap));
dsl_dataset_rele(ds, FTAG);
}
taskq_t *
dsl_pool_zrele_taskq(dsl_pool_t *dp)
{
return (dp->dp_zrele_taskq);
}
taskq_t *
dsl_pool_unlinked_drain_taskq(dsl_pool_t *dp)
{
return (dp->dp_unlinked_drain_taskq);
}
/*
* Walk through the pool-wide zap object of temporary snapshot user holds
* and release them.
*/
void
dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
{
zap_attribute_t *za;
zap_cursor_t zc;
objset_t *mos = dp->dp_meta_objset;
uint64_t zapobj = dp->dp_tmp_userrefs_obj;
nvlist_t *holds;
if (zapobj == 0)
return;
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
holds = fnvlist_alloc();
za = zap_attribute_alloc();
for (zap_cursor_init(&zc, mos, zapobj);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
char *htag;
nvlist_t *tags;
htag = strchr(za->za_name, '-');
*htag = '\0';
++htag;
if (nvlist_lookup_nvlist(holds, za->za_name, &tags) != 0) {
tags = fnvlist_alloc();
fnvlist_add_boolean(tags, htag);
fnvlist_add_nvlist(holds, za->za_name, tags);
fnvlist_free(tags);
} else {
fnvlist_add_boolean(tags, htag);
}
}
dsl_dataset_user_release_tmp(dp, holds);
fnvlist_free(holds);
zap_cursor_fini(&zc);
zap_attribute_free(za);
}
/*
* Create the pool-wide zap object for storing temporary snapshot holds.
*/
static void
dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;
ASSERT(dp->dp_tmp_userrefs_obj == 0);
ASSERT(dmu_tx_is_syncing(tx));
dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
}
static int
dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
{
objset_t *mos = dp->dp_meta_objset;
uint64_t zapobj = dp->dp_tmp_userrefs_obj;
char *name;
int error;
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
ASSERT(dmu_tx_is_syncing(tx));
/*
* If the pool was created prior to SPA_VERSION_USERREFS, the
* zap object for temporary holds might not exist yet.
*/
if (zapobj == 0) {
if (holding) {
dsl_pool_user_hold_create_obj(dp, tx);
zapobj = dp->dp_tmp_userrefs_obj;
} else {
return (SET_ERROR(ENOENT));
}
}
name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
if (holding)
error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
else
error = zap_remove(mos, zapobj, name, tx);
kmem_strfree(name);
return (error);
}
/*
* Add a temporary hold for the given dataset object and tag.
*/
int
dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
uint64_t now, dmu_tx_t *tx)
{
return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
}
/*
* Release a temporary hold for the given dataset object and tag.
*/
int
dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
dmu_tx_t *tx)
{
return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
tx, B_FALSE));
}
/*
* DSL Pool Configuration Lock
*
* The dp_config_rwlock protects against changes to DSL state (e.g. dataset
* creation / destruction / rename / property setting). It must be held for
* read to hold a dataset or dsl_dir. I.e. you must call
* dsl_pool_config_enter() or dsl_pool_hold() before calling
* dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
* must be held continuously until all datasets and dsl_dirs are released.
*
* The only exception to this rule is that if a "long hold" is placed on
* a dataset, then the dp_config_rwlock may be dropped while the dataset
* is still held. The long hold will prevent the dataset from being
* destroyed -- the destroy will fail with EBUSY. A long hold can be
* obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
* (by calling dsl_{dataset,objset}_{try}own{_obj}).
*
* Legitimate long-holders (including owners) should be long-running, cancelable
* tasks that should cause "zfs destroy" to fail. This includes DMU
* consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
* "zfs send", and "zfs diff". There are several other long-holders whose
* uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
*
* The usual formula for long-holding would be:
* dsl_pool_hold()
* dsl_dataset_hold()
* ... perform checks ...
* dsl_dataset_long_hold()
* dsl_pool_rele()
* ... perform long-running task ...
* dsl_dataset_long_rele()
* dsl_dataset_rele()
*
* Note that when the long hold is released, the dataset is still held but
* the pool is not held. The dataset may change arbitrarily during this time
* (e.g. it could be destroyed). Therefore you shouldn't do anything to the
* dataset except release it.
*
* Operations generally fall somewhere into the following taxonomy:
*
* Read-Only Modifying
*
* Dataset Layer / MOS zfs get zfs destroy
*
* Individual Dataset read() write()
*
*
* Dataset Layer Operations
*
* Modifying operations should generally use dsl_sync_task(). The synctask
* infrastructure enforces proper locking strategy with respect to the
* dp_config_rwlock. See the comment above dsl_sync_task() for details.
*
* Read-only operations will manually hold the pool, then the dataset, obtain
* information from the dataset, then release the pool and dataset.
* dmu_objset_{hold,rele}() are convenience routines that also do the pool
* hold/rele.
*
*
* Operations On Individual Datasets
*
* Objects _within_ an objset should only be modified by the current 'owner'
* of the objset to prevent incorrect concurrent modification. Thus, use
* {dmu_objset,dsl_dataset}_own to mark some entity as the current owner,
* and fail with EBUSY if there is already an owner. The owner can then
* implement its own locking strategy, independent of the dataset layer's
* locking infrastructure.
* (E.g., the ZPL has its own set of locks to control concurrency. A regular
* vnop will not reach into the dataset layer).
*
* Ideally, objects would also only be read by the objset’s owner, so that we
* don’t observe state mid-modification.
* (E.g. the ZPL is creating a new object and linking it into a directory; if
* you don’t coordinate with the ZPL to hold ZPL-level locks, you could see an
* intermediate state. The ioctl level violates this but in pretty benign
* ways, e.g. reading the zpl props object.)
*/
int
dsl_pool_hold(const char *name, const void *tag, dsl_pool_t **dp)
{
spa_t *spa;
int error;
error = spa_open(name, &spa, tag);
if (error == 0) {
*dp = spa_get_dsl(spa);
dsl_pool_config_enter(*dp, tag);
}
return (error);
}
void
dsl_pool_rele(dsl_pool_t *dp, const void *tag)
{
dsl_pool_config_exit(dp, tag);
spa_close(dp->dp_spa, tag);
}
void
dsl_pool_config_enter(dsl_pool_t *dp, const void *tag)
{
/*
* We use a "reentrant" reader-writer lock, but not reentrantly.
*
* The rrwlock can (with the track_all flag) track all reading threads,
* which is very useful for debugging which code path failed to release
* the lock, and for verifying that the *current* thread does hold
* the lock.
*
* (Unlike a rwlock, which knows that N threads hold it for
* read, but not *which* threads, so rw_held(RW_READER) returns TRUE
* if any thread holds it for read, even if this thread doesn't).
*/
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
}
void
dsl_pool_config_enter_prio(dsl_pool_t *dp, const void *tag)
{
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
}
void
dsl_pool_config_exit(dsl_pool_t *dp, const void *tag)
{
rrw_exit(&dp->dp_config_rwlock, tag);
}
boolean_t
dsl_pool_config_held(dsl_pool_t *dp)
{
return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
}
boolean_t
dsl_pool_config_held_writer(dsl_pool_t *dp)
{
return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
}
EXPORT_SYMBOL(dsl_pool_config_enter);
EXPORT_SYMBOL(dsl_pool_config_exit);
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, UINT, ZMOD_RD,
"Max percent of RAM allowed to be dirty");
/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, UINT, ZMOD_RD,
"zfs_dirty_data_max upper bound as % of RAM");
ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, UINT, ZMOD_RW,
"Transaction delay threshold");
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, U64, ZMOD_RW,
"Determines the dirty space limit");
ZFS_MODULE_PARAM(zfs, zfs_, wrlog_data_max, U64, ZMOD_RW,
"The size limit of write-transaction zil log data");
/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, U64, ZMOD_RD,
"zfs_dirty_data_max upper bound in bytes");
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, UINT, ZMOD_RW,
"Dirty data txg sync threshold as a percentage of zfs_dirty_data_max");
ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, U64, ZMOD_RW,
"How quickly delay approaches infinity");
ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_nthr_pct, INT, ZMOD_RW,
"Max percent of CPUs that are used per dp_sync_taskq");
ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_minalloc, INT, ZMOD_RW,
"Number of taskq entries that are pre-populated");
ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_maxalloc, INT, ZMOD_RW,
"Max number of taskq entries that are cached");
diff --git a/sys/contrib/openzfs/module/zfs/dsl_scan.c b/sys/contrib/openzfs/module/zfs/dsl_scan.c
index 3eba4cb35cc6..5977f8c82b45 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_scan.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_scan.c
@@ -1,5347 +1,5352 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2016 Gary Mills
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright 2019 Joyent, Inc.
*/
#include <sys/dsl_scan.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/dnode.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
#include <sys/zio_checksum.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/range_tree.h>
#include <sys/dbuf.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/*
* Grand theory statement on scan queue sorting
*
* Scanning is implemented by recursively traversing all indirection levels
* in an object and reading all blocks referenced from said objects. This
* results in us approximately traversing the object from lowest logical
* offset to the highest. For best performance, we would want the logical
* blocks to be physically contiguous. However, this is frequently not the
* case with pools given the allocation patterns of copy-on-write filesystems.
* So instead, we put the I/Os into a reordering queue and issue them in a
* way that will most benefit physical disks (LBA-order).
*
* Queue management:
*
* Ideally, we would want to scan all metadata and queue up all block I/O
* prior to starting to issue it, because that allows us to do an optimal
* sorting job. This can however consume large amounts of memory. Therefore
* we continuously monitor the size of the queues and constrain them to 5%
* (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
* limit, we clear out a few of the largest extents at the head of the queues
* to make room for more scanning. Hopefully, these extents will be fairly
* large and contiguous, allowing us to approach sequential I/O throughput
* even without a fully sorted tree.
*
* Metadata scanning takes place in dsl_scan_visit(), which is called from
* dsl_scan_sync() every spa_sync(). If we have either fully scanned all
* metadata on the pool, or we need to make room in memory because our
* queues are too large, dsl_scan_visit() is postponed and
* scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
* that metadata scanning and queued I/O issuing are mutually exclusive. This
* allows us to provide maximum sequential I/O throughput for the majority of
* I/O's issued since sequential I/O performance is significantly negatively
* impacted if it is interleaved with random I/O.
*
* Implementation Notes
*
* One side effect of the queued scanning algorithm is that the scanning code
* needs to be notified whenever a block is freed. This is needed to allow
* the scanning code to remove these I/Os from the issuing queue. Additionally,
* we do not attempt to queue gang blocks to be issued sequentially since this
* is very hard to do and would have an extremely limited performance benefit.
* Instead, we simply issue gang I/Os as soon as we find them using the legacy
* algorithm.
*
* Backwards compatibility
*
* This new algorithm is backwards compatible with the legacy on-disk data
* structures (and therefore does not require a new feature flag).
* Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
* will stop scanning metadata (in logical order) and wait for all outstanding
* sorted I/O to complete. Once this is done, we write out a checkpoint
* bookmark, indicating that we have scanned everything logically before it.
* If the pool is imported on a machine without the new sorting algorithm,
* the scan simply resumes from the last checkpoint using the legacy algorithm.
*/
typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
const zbookmark_phys_t *);
static scan_cb_t dsl_scan_scrub_cb;
static int scan_ds_queue_compare(const void *a, const void *b);
static int scan_prefetch_queue_compare(const void *a, const void *b);
static void scan_ds_queue_clear(dsl_scan_t *scn);
static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn);
static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
uint64_t *txg);
static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
static uint64_t dsl_scan_count_data_disks(spa_t *spa);
static void read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb);
extern uint_t zfs_vdev_async_write_active_min_dirty_percent;
static int zfs_scan_blkstats = 0;
/*
* 'zpool status' uses bytes processed per pass to report throughput and
* estimate time remaining. We define a pass to start when the scanning
* phase completes for a sequential resilver. Optionally, this value
* may be used to reset the pass statistics every N txgs to provide an
* estimated completion time based on currently observed performance.
*/
static uint_t zfs_scan_report_txgs = 0;
/*
* By default zfs will check to ensure it is not over the hard memory
* limit before each txg. If finer-grained control of this is needed
* this value can be set to 1 to enable checking before scanning each
* block.
*/
static int zfs_scan_strict_mem_lim = B_FALSE;
/*
* Maximum number of parallelly executed bytes per leaf vdev. We attempt
* to strike a balance here between keeping the vdev queues full of I/Os
* at all times and not overflowing the queues to cause long latency,
* which would cause long txg sync times. No matter what, we will not
* overload the drives with I/O, since that is protected by
* zfs_vdev_scrub_max_active.
*/
static uint64_t zfs_scan_vdev_limit = 16 << 20;
static uint_t zfs_scan_issue_strategy = 0;
/* don't queue & sort zios, go direct */
static int zfs_scan_legacy = B_FALSE;
static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
/*
* fill_weight is non-tunable at runtime, so we copy it at module init from
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
* break queue sorting.
*/
static uint_t zfs_scan_fill_weight = 3;
static uint64_t fill_weight;
/* See dsl_scan_should_clear() for details on the memory limit tunables */
static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
/* fraction of physmem */
static uint_t zfs_scan_mem_lim_fact = 20;
/* fraction of mem lim above */
static uint_t zfs_scan_mem_lim_soft_fact = 20;
/* minimum milliseconds to scrub per txg */
static uint_t zfs_scrub_min_time_ms = 1000;
/* minimum milliseconds to obsolete per txg */
static uint_t zfs_obsolete_min_time_ms = 500;
/* minimum milliseconds to free per txg */
static uint_t zfs_free_min_time_ms = 1000;
/* minimum milliseconds to resilver per txg */
static uint_t zfs_resilver_min_time_ms = 3000;
static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */
int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
static const ddt_class_t zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
/* max number of blocks to free in a single TXG */
static uint64_t zfs_async_block_max_blocks = UINT64_MAX;
/* max number of dedup blocks to free in a single TXG */
static uint64_t zfs_max_async_dedup_frees = 100000;
/* set to disable resilver deferring */
static int zfs_resilver_disable_defer = B_FALSE;
/* Don't defer a resilver if the one in progress only got this far: */
static uint_t zfs_resilver_defer_percent = 10;
/*
* We wait a few txgs after importing a pool to begin scanning so that
* the import / mounting code isn't held up by scrub / resilver IO.
* Unfortunately, it is a bit difficult to determine exactly how long
* this will take since userspace will trigger fs mounts asynchronously
* and the kernel will create zvol minors asynchronously. As a result,
* the value provided here is a bit arbitrary, but represents a
* reasonable estimate of how many txgs it will take to finish fully
* importing a pool
*/
#define SCAN_IMPORT_WAIT_TXGS 5
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
#define DSL_SCAN_IS_SCRUB(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB)
/*
* Enable/disable the processing of the free_bpobj object.
*/
static int zfs_free_bpobj_enabled = 1;
/* Error blocks to be scrubbed in one txg. */
static uint_t zfs_scrub_error_blocks_per_txg = 1 << 12;
/* the order has to match pool_scan_type */
static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
NULL,
dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
};
/* In core node for the scn->scn_queue. Represents a dataset to be scanned */
typedef struct {
uint64_t sds_dsobj;
uint64_t sds_txg;
avl_node_t sds_node;
} scan_ds_t;
/*
* This controls what conditions are placed on dsl_scan_sync_state():
* SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0
* SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0.
* SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise
* write out the scn_phys_cached version.
* See dsl_scan_sync_state for details.
*/
typedef enum {
SYNC_OPTIONAL,
SYNC_MANDATORY,
SYNC_CACHED
} state_sync_type_t;
/*
* This struct represents the minimum information needed to reconstruct a
* zio for sequential scanning. This is useful because many of these will
* accumulate in the sequential IO queues before being issued, so saving
* memory matters here.
*/
typedef struct scan_io {
/* fields from blkptr_t */
uint64_t sio_blk_prop;
uint64_t sio_phys_birth;
uint64_t sio_birth;
zio_cksum_t sio_cksum;
uint32_t sio_nr_dvas;
/* fields from zio_t */
uint32_t sio_flags;
zbookmark_phys_t sio_zb;
/* members for queue sorting */
union {
avl_node_t sio_addr_node; /* link into issuing queue */
list_node_t sio_list_node; /* link for issuing to disk */
} sio_nodes;
/*
* There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
* depending on how many were in the original bp. Only the
* first DVA is really used for sorting and issuing purposes.
* The other DVAs (if provided) simply exist so that the zio
* layer can find additional copies to repair from in the
* event of an error. This array must go at the end of the
* struct to allow this for the variable number of elements.
*/
dva_t sio_dva[];
} scan_io_t;
#define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
#define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
#define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0])
#define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0])
#define SIO_GET_END_OFFSET(sio) \
(SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
#define SIO_GET_MUSED(sio) \
(sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
struct dsl_scan_io_queue {
dsl_scan_t *q_scn; /* associated dsl_scan_t */
vdev_t *q_vd; /* top-level vdev that this queue represents */
zio_t *q_zio; /* scn_zio_root child for waiting on IO */
/* trees used for sorting I/Os and extents of I/Os */
- range_tree_t *q_exts_by_addr;
+ zfs_range_tree_t *q_exts_by_addr;
zfs_btree_t q_exts_by_size;
avl_tree_t q_sios_by_addr;
uint64_t q_sio_memused;
uint64_t q_last_ext_addr;
/* members for zio rate limiting */
uint64_t q_maxinflight_bytes;
uint64_t q_inflight_bytes;
kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
/* per txg statistics */
uint64_t q_total_seg_size_this_txg;
uint64_t q_segs_this_txg;
uint64_t q_total_zio_size_this_txg;
uint64_t q_zios_this_txg;
};
/* private data for dsl_scan_prefetch_cb() */
typedef struct scan_prefetch_ctx {
zfs_refcount_t spc_refcnt; /* refcount for memory management */
dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */
boolean_t spc_root; /* is this prefetch for an objset? */
uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */
uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */
} scan_prefetch_ctx_t;
/* private data for dsl_scan_prefetch() */
typedef struct scan_prefetch_issue_ctx {
avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */
scan_prefetch_ctx_t *spic_spc; /* spc for the callback */
blkptr_t spic_bp; /* bp to prefetch */
zbookmark_phys_t spic_zb; /* bookmark to prefetch */
} scan_prefetch_issue_ctx_t;
static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
scan_io_t *sio);
static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
static void scan_io_queues_destroy(dsl_scan_t *scn);
static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP];
/* sio->sio_nr_dvas must be set so we know which cache to free from */
static void
sio_free(scan_io_t *sio)
{
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio);
}
/* It is up to the caller to set sio->sio_nr_dvas for freeing */
static scan_io_t *
sio_alloc(unsigned short nr_dvas)
{
ASSERT3U(nr_dvas, >, 0);
ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP);
return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP));
}
void
scan_init(void)
{
/*
* This is used in ext_size_compare() to weight segments
* based on how sparse they are. This cannot be changed
* mid-scan and the tree comparison functions don't currently
* have a mechanism for passing additional context to the
* compare functions. Thus we store this value globally and
* we only allow it to be set at module initialization time
*/
fill_weight = zfs_scan_fill_weight;
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
char name[36];
(void) snprintf(name, sizeof (name), "sio_cache_%d", i);
sio_cache[i] = kmem_cache_create(name,
(sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
0, NULL, NULL, NULL, NULL, NULL, 0);
}
}
void
scan_fini(void)
{
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
kmem_cache_destroy(sio_cache[i]);
}
}
static inline boolean_t
dsl_scan_is_running(const dsl_scan_t *scn)
{
return (scn->scn_phys.scn_state == DSS_SCANNING);
}
boolean_t
dsl_scan_resilvering(dsl_pool_t *dp)
{
return (dsl_scan_is_running(dp->dp_scan) &&
dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
}
static inline void
sio2bp(const scan_io_t *sio, blkptr_t *bp)
{
memset(bp, 0, sizeof (*bp));
bp->blk_prop = sio->sio_blk_prop;
BP_SET_PHYSICAL_BIRTH(bp, sio->sio_phys_birth);
BP_SET_LOGICAL_BIRTH(bp, sio->sio_birth);
bp->blk_fill = 1; /* we always only work with data pointers */
bp->blk_cksum = sio->sio_cksum;
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t));
}
static inline void
bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
{
sio->sio_blk_prop = bp->blk_prop;
sio->sio_phys_birth = BP_GET_PHYSICAL_BIRTH(bp);
sio->sio_birth = BP_GET_LOGICAL_BIRTH(bp);
sio->sio_cksum = bp->blk_cksum;
sio->sio_nr_dvas = BP_GET_NDVAS(bp);
/*
* Copy the DVAs to the sio. We need all copies of the block so
* that the self healing code can use the alternate copies if the
* first is corrupted. We want the DVA at index dva_i to be first
* in the sio since this is the primary one that we want to issue.
*/
for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) {
sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas];
}
}
int
dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
{
int err;
dsl_scan_t *scn;
spa_t *spa = dp->dp_spa;
uint64_t f;
scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
scn->scn_dp = dp;
/*
* It's possible that we're resuming a scan after a reboot so
* make sure that the scan_async_destroying flag is initialized
* appropriately.
*/
ASSERT(!scn->scn_async_destroying);
scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY);
/*
* Calculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
* Limits for the issuing phase are done per top-level vdev and
* are handled separately.
*/
scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
offsetof(scan_ds_t, sds_node));
mutex_init(&scn->scn_queue_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
sizeof (scan_prefetch_issue_ctx_t),
offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_func", sizeof (uint64_t), 1, &f);
if (err == 0) {
/*
* There was an old-style scrub in progress. Restart a
* new-style scrub from the beginning.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("old-style scrub was in progress for %s; "
"restarting new-style scrub in txg %llu",
spa->spa_name,
(longlong_t)scn->scn_restart_txg);
/*
* Load the queue obj from the old location so that it
* can be freed by dsl_scan_done().
*/
(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_queue", sizeof (uint64_t), 1,
&scn->scn_phys.scn_queue_obj);
} else {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ERRORSCRUB, sizeof (uint64_t),
ERRORSCRUB_PHYS_NUMINTS, &scn->errorscrub_phys);
if (err != 0 && err != ENOENT)
return (err);
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys);
/*
* Detect if the pool contains the signature of #2094. If it
* does properly update the scn->scn_phys structure and notify
* the administrator by setting an errata for the pool.
*/
if (err == EOVERFLOW) {
uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
(23 * sizeof (uint64_t)));
err = zap_lookup(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
if (err == 0) {
uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
if (overflow & ~DSL_SCAN_FLAGS_MASK ||
scn->scn_async_destroying) {
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
return (EOVERFLOW);
}
memcpy(&scn->scn_phys, zaptmp,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
/* Required scrub already in progress. */
if (scn->scn_phys.scn_state == DSS_FINISHED ||
scn->scn_phys.scn_state == DSS_CANCELED)
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_SCRUB;
}
}
if (err == ENOENT)
return (0);
else if (err)
return (err);
/*
* We might be restarting after a reboot, so jump the issued
* counter to how far we've scanned. We know we're consistent
* up to here.
*/
scn->scn_issued_before_pass = scn->scn_phys.scn_examined -
scn->scn_phys.scn_skipped;
if (dsl_scan_is_running(scn) &&
spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
/*
* A new-type scrub was in progress on an old
* pool, and the pool was accessed by old
* software. Restart from the beginning, since
* the old software may have changed the pool in
* the meantime.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("new-style scrub for %s was modified "
"by old software; restarting in txg %llu",
spa->spa_name,
(longlong_t)scn->scn_restart_txg);
} else if (dsl_scan_resilvering(dp)) {
/*
* If a resilver is in progress and there are already
* errors, restart it instead of finishing this scan and
* then restarting it. If there haven't been any errors
* then remember that the incore DTL is valid.
*/
if (scn->scn_phys.scn_errors > 0) {
scn->scn_restart_txg = txg;
zfs_dbgmsg("resilver can't excise DTL_MISSING "
"when finished; restarting on %s in txg "
"%llu",
spa->spa_name,
(u_longlong_t)scn->scn_restart_txg);
} else {
/* it's safe to excise DTL when finished */
spa->spa_scrub_started = B_TRUE;
}
}
}
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
for (zap_cursor_init(&zc, dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj);
zap_cursor_retrieve(&zc, za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za->za_name, NULL),
za->za_first_integer);
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
}
ddt_walk_init(spa, scn->scn_phys.scn_max_txg);
spa_scan_stat_init(spa);
vdev_scan_stat_init(spa->spa_root_vdev);
return (0);
}
void
dsl_scan_fini(dsl_pool_t *dp)
{
if (dp->dp_scan != NULL) {
dsl_scan_t *scn = dp->dp_scan;
if (scn->scn_taskq != NULL)
taskq_destroy(scn->scn_taskq);
scan_ds_queue_clear(scn);
avl_destroy(&scn->scn_queue);
mutex_destroy(&scn->scn_queue_lock);
scan_ds_prefetch_queue_clear(scn);
avl_destroy(&scn->scn_prefetch_queue);
kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
dp->dp_scan = NULL;
}
}
static boolean_t
dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
{
return (scn->scn_restart_txg != 0 &&
scn->scn_restart_txg <= tx->tx_txg);
}
boolean_t
dsl_scan_resilver_scheduled(dsl_pool_t *dp)
{
return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) ||
(spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER));
}
boolean_t
dsl_scan_scrubbing(const dsl_pool_t *dp)
{
dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
return (scn_phys->scn_state == DSS_SCANNING &&
scn_phys->scn_func == POOL_SCAN_SCRUB);
}
boolean_t
dsl_errorscrubbing(const dsl_pool_t *dp)
{
dsl_errorscrub_phys_t *errorscrub_phys = &dp->dp_scan->errorscrub_phys;
return (errorscrub_phys->dep_state == DSS_ERRORSCRUBBING &&
errorscrub_phys->dep_func == POOL_SCAN_ERRORSCRUB);
}
boolean_t
dsl_errorscrub_is_paused(const dsl_scan_t *scn)
{
return (dsl_errorscrubbing(scn->scn_dp) &&
scn->errorscrub_phys.dep_paused_flags);
}
boolean_t
dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
{
return (dsl_scan_scrubbing(scn->scn_dp) &&
scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
}
static void
dsl_errorscrub_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
{
scn->errorscrub_phys.dep_cursor =
zap_cursor_serialize(&scn->errorscrub_cursor);
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ERRORSCRUB, sizeof (uint64_t), ERRORSCRUB_PHYS_NUMINTS,
&scn->errorscrub_phys, tx));
}
static void
dsl_errorscrub_setup_sync(void *arg, dmu_tx_t *tx)
{
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(!dsl_errorscrubbing(scn->scn_dp));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
scn->errorscrub_phys.dep_func = *funcp;
scn->errorscrub_phys.dep_state = DSS_ERRORSCRUBBING;
scn->errorscrub_phys.dep_start_time = gethrestime_sec();
scn->errorscrub_phys.dep_to_examine = spa_get_last_errlog_size(spa);
scn->errorscrub_phys.dep_examined = 0;
scn->errorscrub_phys.dep_errors = 0;
scn->errorscrub_phys.dep_cursor = 0;
zap_cursor_init_serialized(&scn->errorscrub_cursor,
spa->spa_meta_objset, spa->spa_errlog_last,
scn->errorscrub_phys.dep_cursor);
vdev_config_dirty(spa->spa_root_vdev);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_START);
dsl_errorscrub_sync_state(scn, tx);
spa_history_log_internal(spa, "error scrub setup", tx,
"func=%u mintxg=%u maxtxg=%llu",
*funcp, 0, (u_longlong_t)tx->tx_txg);
}
static int
dsl_errorscrub_setup_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (dsl_scan_is_running(scn) || (dsl_errorscrubbing(scn->scn_dp))) {
return (SET_ERROR(EBUSY));
}
if (spa_get_last_errlog_size(scn->scn_dp->dp_spa) == 0) {
return (ECANCELED);
}
return (0);
}
/*
* Writes out a persistent dsl_scan_phys_t record to the pool directory.
* Because we can be running in the block sorting algorithm, we do not always
* want to write out the record, only when it is "safe" to do so. This safety
* condition is achieved by making sure that the sorting queues are empty
* (scn_queues_pending == 0). When this condition is not true, the sync'd state
* is inconsistent with how much actual scanning progress has been made. The
* kind of sync to be performed is specified by the sync_type argument. If the
* sync is optional, we only sync if the queues are empty. If the sync is
* mandatory, we do a hard ASSERT to make sure that the queues are empty. The
* third possible state is a "cached" sync. This is done in response to:
* 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
* destroyed, so we wouldn't be able to restart scanning from it.
* 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
* superseded by a newer snapshot.
* 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
* swapped with its clone.
* In all cases, a cached sync simply rewrites the last record we've written,
* just slightly modified. For the modifications that are performed to the
* last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
* dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
*/
static void
dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
{
int i;
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0);
if (scn->scn_queues_pending == 0) {
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
if (q == NULL)
continue;
mutex_enter(&vd->vdev_scan_io_queue_lock);
ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==,
NULL);
- ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
+ ASSERT3P(zfs_range_tree_first(q->q_exts_by_addr), ==,
+ NULL);
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
if (scn->scn_phys.scn_queue_obj != 0)
scan_ds_queue_sync(scn, tx);
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys, tx));
memcpy(&scn->scn_phys_cached, &scn->scn_phys,
sizeof (scn->scn_phys));
if (scn->scn_checkpointing)
zfs_dbgmsg("finish scan checkpoint for %s",
spa->spa_name);
scn->scn_checkpointing = B_FALSE;
scn->scn_last_checkpoint = ddi_get_lbolt();
} else if (sync_type == SYNC_CACHED) {
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys_cached, tx));
}
}
int
dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd) ||
dsl_errorscrubbing(scn->scn_dp))
return (SET_ERROR(EBUSY));
return (0);
}
void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{
setup_sync_arg_t *setup_sync_arg = (setup_sync_arg_t *)arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dmu_object_type_t ot = 0;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT3U(setup_sync_arg->func, >, POOL_SCAN_NONE);
ASSERT3U(setup_sync_arg->func, <, POOL_SCAN_FUNCS);
memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
/*
* If we are starting a fresh scrub, we erase the error scrub
* information from disk.
*/
memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
dsl_errorscrub_sync_state(scn, tx);
scn->scn_phys.scn_func = setup_sync_arg->func;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = setup_sync_arg->txgstart;
if (setup_sync_arg->txgend == 0) {
scn->scn_phys.scn_max_txg = tx->tx_txg;
} else {
scn->scn_phys.scn_max_txg = setup_sync_arg->txgend;
}
scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
scn->scn_phys.scn_start_time = gethrestime_sec();
scn->scn_phys.scn_errors = 0;
scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
scn->scn_issued_before_pass = 0;
scn->scn_restart_txg = 0;
scn->scn_done_txg = 0;
scn->scn_last_checkpoint = 0;
scn->scn_checkpointing = B_FALSE;
spa_scan_stat_init(spa);
vdev_scan_stat_init(spa->spa_root_vdev);
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
/* rewrite all disk labels */
vdev_config_dirty(spa->spa_root_vdev);
if (vdev_resilver_needed(spa->spa_root_vdev,
&scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_START);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
}
spa->spa_scrub_started = B_TRUE;
/*
* If this is an incremental scrub, limit the DDT scrub phase
* to just the auto-ditto class (for correctness); the rest
* of the scrub should go faster using top-down pruning.
*/
if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
/*
* When starting a resilver clear any existing rebuild state.
* This is required to prevent stale rebuild status from
* being reported when a rebuild is run, then a resilver and
* finally a scrub. In which case only the scrub status
* should be reported by 'zpool status'.
*/
if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) {
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
vdev_rebuild_clear_sync(
(void *)(uintptr_t)vd->vdev_id, tx);
}
}
}
/* back to the generic stuff */
if (zfs_scan_blkstats) {
if (dp->dp_blkstats == NULL) {
dp->dp_blkstats =
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
}
memset(&dp->dp_blkstats->zab_type, 0,
sizeof (dp->dp_blkstats->zab_type));
} else {
if (dp->dp_blkstats) {
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
dp->dp_blkstats = NULL;
}
}
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
ddt_walk_init(spa, scn->scn_phys.scn_max_txg);
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_history_log_internal(spa, "scan setup", tx,
"func=%u mintxg=%llu maxtxg=%llu",
setup_sync_arg->func, (u_longlong_t)scn->scn_phys.scn_min_txg,
(u_longlong_t)scn->scn_phys.scn_max_txg);
}
/*
* Called by ZFS_IOC_POOL_SCRUB and ZFS_IOC_POOL_SCAN ioctl to start a scrub,
* error scrub or resilver. Can also be called to resume a paused scrub or
* error scrub.
*/
int
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
setup_sync_arg_t setup_sync_arg;
if (func != POOL_SCAN_SCRUB && (txgstart != 0 || txgend != 0)) {
return (EINVAL);
}
/*
* Purge all vdev caches and probe all devices. We do this here
* rather than in sync context because this requires a writer lock
* on the spa_config lock, which we can't do from sync context. The
* spa_scrub_reopen flag indicates that vdev_open() should not
* attempt to start another scrub.
*/
spa_vdev_state_enter(spa, SCL_NONE);
spa->spa_scrub_reopen = B_TRUE;
vdev_reopen(spa->spa_root_vdev);
spa->spa_scrub_reopen = B_FALSE;
(void) spa_vdev_state_exit(spa, NULL, 0);
if (func == POOL_SCAN_RESILVER) {
dsl_scan_restart_resilver(spa->spa_dsl_pool, 0);
return (0);
}
if (func == POOL_SCAN_ERRORSCRUB) {
if (dsl_errorscrub_is_paused(dp->dp_scan)) {
/*
* got error scrub start cmd, resume paused error scrub.
*/
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
POOL_SCRUB_NORMAL);
if (err == 0) {
spa_event_notify(spa, NULL, NULL,
ESC_ZFS_ERRORSCRUB_RESUME);
return (ECANCELED);
}
return (SET_ERROR(err));
}
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_errorscrub_setup_check, dsl_errorscrub_setup_sync,
&func, 0, ZFS_SPACE_CHECK_RESERVED));
}
if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
/* got scrub start cmd, resume paused scrub */
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
POOL_SCRUB_NORMAL);
if (err == 0) {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
return (SET_ERROR(ECANCELED));
}
return (SET_ERROR(err));
}
setup_sync_arg.func = func;
setup_sync_arg.txgstart = txgstart;
setup_sync_arg.txgend = txgend;
return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
dsl_scan_setup_sync, &setup_sync_arg, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static void
dsl_errorscrub_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
if (complete) {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_FINISH);
spa_history_log_internal(spa, "error scrub done", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
} else {
spa_history_log_internal(spa, "error scrub canceled", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
}
scn->errorscrub_phys.dep_state = complete ? DSS_FINISHED : DSS_CANCELED;
spa->spa_scrub_active = B_FALSE;
spa_errlog_rotate(spa);
scn->errorscrub_phys.dep_end_time = gethrestime_sec();
zap_cursor_fini(&scn->errorscrub_cursor);
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
ASSERT(!dsl_errorscrubbing(scn->scn_dp));
}
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
static const char *old_names[] = {
"scrub_bookmark",
"scrub_ddt_bookmark",
"scrub_ddt_class_max",
"scrub_queue",
"scrub_min_txg",
"scrub_max_txg",
"scrub_func",
"scrub_errors",
NULL
};
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int i;
/* Remove any remnants of an old-style scrub. */
for (i = 0; old_names[i]; i++) {
(void) zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
}
if (scn->scn_phys.scn_queue_obj != 0) {
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = 0;
}
scan_ds_queue_clear(scn);
scan_ds_prefetch_queue_clear(scn);
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
/*
* If we were "restarted" from a stopped state, don't bother
* with anything else.
*/
if (!dsl_scan_is_running(scn)) {
ASSERT(!scn->scn_is_sorted);
return;
}
if (scn->scn_is_sorted) {
scan_io_queues_destroy(scn);
scn->scn_is_sorted = B_FALSE;
if (scn->scn_taskq != NULL) {
taskq_destroy(scn->scn_taskq);
scn->scn_taskq = NULL;
}
}
scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
spa_notify_waiters(spa);
if (dsl_scan_restarting(scn, tx)) {
spa_history_log_internal(spa, "scan aborted, restarting", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
} else if (!complete) {
spa_history_log_internal(spa, "scan cancelled", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
} else {
spa_history_log_internal(spa, "scan done", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB(scn)) {
VERIFY0(zap_update(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LAST_SCRUBBED_TXG,
sizeof (uint64_t), 1,
&scn->scn_phys.scn_max_txg, tx));
spa->spa_scrubbed_last_txg = scn->scn_phys.scn_max_txg;
}
}
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_active = B_FALSE;
/*
* If the scrub/resilver completed, update all DTLs to
* reflect this. Whether it succeeded or not, vacate
* all temporary scrub DTLs.
*
* As the scrub does not currently support traversing
* data that have been freed but are part of a checkpoint,
* we don't mark the scrub as done in the DTLs as faults
* may still exist in those vdevs.
*/
if (complete &&
!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE);
if (scn->scn_phys.scn_min_txg) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_FINISH);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL,
ESC_ZFS_SCRUB_FINISH);
}
} else {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
0, B_TRUE, B_FALSE);
}
spa_errlog_rotate(spa);
/*
* Don't clear flag until after vdev_dtl_reassess to ensure that
* DTL_MISSING will get updated when possible.
*/
spa->spa_scrub_started = B_FALSE;
/*
* We may have finished replacing a device.
* Let the async thread assess this and handle the detach.
*/
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
/*
* Clear any resilver_deferred flags in the config.
* If there are drives that need resilvering, kick
* off an asynchronous request to start resilver.
* vdev_clear_resilver_deferred() may update the config
* before the resilver can restart. In the event of
* a crash during this period, the spa loading code
* will find the drives that need to be resilvered
* and start the resilver then.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) &&
vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) {
spa_history_log_internal(spa,
"starting deferred resilver", tx, "errors=%llu",
(u_longlong_t)spa_approx_errlog_size(spa));
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/* Clear recent error events (i.e. duplicate events tracking) */
if (complete)
zfs_ereport_clear(spa, NULL);
}
scn->scn_phys.scn_end_time = gethrestime_sec();
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
ASSERT(!dsl_scan_is_running(scn));
}
static int
dsl_errorscrub_pause_resume_check(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/*
* can't pause a error scrub when there is no in-progress
* error scrub.
*/
if (!dsl_errorscrubbing(dp))
return (SET_ERROR(ENOENT));
/* can't pause a paused error scrub */
if (dsl_errorscrub_is_paused(scn))
return (SET_ERROR(EBUSY));
} else if (*cmd != POOL_SCRUB_NORMAL) {
return (SET_ERROR(ENOTSUP));
}
return (0);
}
static void
dsl_errorscrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
spa->spa_scan_pass_errorscrub_pause = gethrestime_sec();
scn->errorscrub_phys.dep_paused_flags = B_TRUE;
dsl_errorscrub_sync_state(scn, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_PAUSED);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_errorscrub_is_paused(scn)) {
/*
* We need to keep track of how much time we spend
* paused per pass so that we can adjust the error scrub
* rate shown in the output of 'zpool status'.
*/
spa->spa_scan_pass_errorscrub_spent_paused +=
gethrestime_sec() -
spa->spa_scan_pass_errorscrub_pause;
spa->spa_scan_pass_errorscrub_pause = 0;
scn->errorscrub_phys.dep_paused_flags = B_FALSE;
zap_cursor_init_serialized(
&scn->errorscrub_cursor,
spa->spa_meta_objset, spa->spa_errlog_last,
scn->errorscrub_phys.dep_cursor);
dsl_errorscrub_sync_state(scn, tx);
}
}
}
static int
dsl_errorscrub_cancel_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
/* can't cancel a error scrub when there is no one in-progress */
if (!dsl_errorscrubbing(scn->scn_dp))
return (SET_ERROR(ENOENT));
return (0);
}
static void
dsl_errorscrub_cancel_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_errorscrub_done(scn, B_FALSE, tx);
dsl_errorscrub_sync_state(scn, tx);
spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL,
ESC_ZFS_ERRORSCRUB_ABORT);
}
static int
dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (!dsl_scan_is_running(scn))
return (SET_ERROR(ENOENT));
return (0);
}
static void
dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_scan_done(scn, B_FALSE, tx);
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
}
int
dsl_scan_cancel(dsl_pool_t *dp)
{
if (dsl_errorscrubbing(dp)) {
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_errorscrub_cancel_check, dsl_errorscrub_cancel_sync,
NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
static int
dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
if (!dsl_scan_scrubbing(dp))
return (SET_ERROR(ENOENT));
/* can't pause a paused scrub */
if (dsl_scan_is_paused_scrub(scn))
return (SET_ERROR(EBUSY));
} else if (*cmd != POOL_SCRUB_NORMAL) {
return (SET_ERROR(ENOTSUP));
}
return (0);
}
static void
dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
spa->spa_scan_pass_scrub_pause = gethrestime_sec();
scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
spa_notify_waiters(spa);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_scan_is_paused_scrub(scn)) {
/*
* We need to keep track of how much time we spend
* paused per pass so that we can adjust the scrub rate
* shown in the output of 'zpool status'
*/
spa->spa_scan_pass_scrub_spent_paused +=
gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
spa->spa_scan_pass_scrub_pause = 0;
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
}
}
/*
* Set scrub pause/resume state if it makes sense to do so
*/
int
dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
{
if (dsl_errorscrubbing(dp)) {
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_errorscrub_pause_resume_check,
dsl_errorscrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
}
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
}
/* start a new scan, or restart an existing one. */
void
dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
{
if (txg == 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
txg = dmu_tx_get_txg(tx);
dp->dp_scan->scn_restart_txg = txg;
dmu_tx_commit(tx);
} else {
dp->dp_scan->scn_restart_txg = txg;
}
zfs_dbgmsg("restarting resilver for %s at txg=%llu",
dp->dp_spa->spa_name, (longlong_t)txg);
}
void
dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
{
zio_free(dp->dp_spa, txg, bp);
}
void
dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
{
ASSERT(dsl_pool_sync_context(dp));
zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
}
static int
scan_ds_queue_compare(const void *a, const void *b)
{
const scan_ds_t *sds_a = a, *sds_b = b;
if (sds_a->sds_dsobj < sds_b->sds_dsobj)
return (-1);
if (sds_a->sds_dsobj == sds_b->sds_dsobj)
return (0);
return (1);
}
static void
scan_ds_queue_clear(dsl_scan_t *scn)
{
void *cookie = NULL;
scan_ds_t *sds;
while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
kmem_free(sds, sizeof (*sds));
}
}
static boolean_t
scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
if (sds != NULL && txg != NULL)
*txg = sds->sds_txg;
return (sds != NULL);
}
static void
scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
{
scan_ds_t *sds;
avl_index_t where;
sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
sds->sds_dsobj = dsobj;
sds->sds_txg = txg;
VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
avl_insert(&scn->scn_queue, sds, where);
}
static void
scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
VERIFY(sds != NULL);
avl_remove(&scn->scn_queue, sds);
kmem_free(sds, sizeof (*sds));
}
static void
scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
ASSERT0(scn->scn_queues_pending);
ASSERT(scn->scn_phys.scn_queue_obj != 0);
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
DMU_OT_NONE, 0, tx);
for (scan_ds_t *sds = avl_first(&scn->scn_queue);
sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
VERIFY0(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
sds->sds_txg, tx));
}
}
/*
* Computes the memory limit state that we're currently in. A sorted scan
* needs quite a bit of memory to hold the sorting queue, so we need to
* reasonably constrain the size so it doesn't impact overall system
* performance. We compute two limits:
* 1) Hard memory limit: if the amount of memory used by the sorting
* queues on a pool gets above this value, we stop the metadata
* scanning portion and start issuing the queued up and sorted
* I/Os to reduce memory usage.
* This limit is calculated as a fraction of physmem (by default 5%).
* We constrain the lower bound of the hard limit to an absolute
* minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
* the upper bound to 5% of the total pool size - no chance we'll
* ever need that much memory, but just to keep the value in check.
* 2) Soft memory limit: once we hit the hard memory limit, we start
* issuing I/O to reduce queue memory usage, but we don't want to
* completely empty out the queues, since we might be able to find I/Os
* that will fill in the gaps of our non-sequential IOs at some point
* in the future. So we stop the issuing of I/Os once the amount of
* memory used drops below the soft limit (at which point we stop issuing
* I/O and start scanning metadata again).
*
* This limit is calculated by subtracting a fraction of the hard
* limit from the hard limit. By default this fraction is 5%, so
* the soft limit is 95% of the hard limit. We cap the size of the
* difference between the hard and soft limits at an absolute
* maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
* sufficient to not cause too frequent switching between the
* metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
* worth of queues is about 1.2 GiB of on-pool data, so scanning
* that should take at least a decent fraction of a second).
*/
static boolean_t
dsl_scan_should_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
uint64_t alloc, mlim_hard, mlim_soft, mused;
alloc = metaslab_class_get_alloc(spa_normal_class(spa));
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
zfs_scan_mem_lim_min);
mlim_hard = MIN(mlim_hard, alloc / 20);
mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
zfs_scan_mem_lim_soft_max);
mused = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
dsl_scan_io_queue_t *queue;
mutex_enter(&tvd->vdev_scan_io_queue_lock);
queue = tvd->vdev_scan_io_queue;
if (queue != NULL) {
/*
* # of extents in exts_by_addr = # in exts_by_size.
* B-tree efficiency is ~75%, but can be as low as 50%.
*/
- mused += zfs_btree_numnodes(&queue->q_exts_by_size) *
- ((sizeof (range_seg_gap_t) + sizeof (uint64_t)) *
+ mused += zfs_btree_numnodes(&queue->q_exts_by_size) * ((
+ sizeof (zfs_range_seg_gap_t) + sizeof (uint64_t)) *
3 / 2) + queue->q_sio_memused;
}
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
if (mused == 0)
ASSERT0(scn->scn_queues_pending);
/*
* If we are above our hard limit, we need to clear out memory.
* If we are below our soft limit, we need to accumulate sequential IOs.
* Otherwise, we should keep doing whatever we are currently doing.
*/
if (mused >= mlim_hard)
return (B_TRUE);
else if (mused < mlim_soft)
return (B_FALSE);
else
return (scn->scn_clearing);
}
static boolean_t
dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
/* we never skip user/group accounting objects */
if (zb && (int64_t)zb->zb_object < 0)
return (B_FALSE);
if (scn->scn_suspending)
return (B_TRUE); /* we're already suspending */
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
return (B_FALSE); /* we're resuming */
/* We only know how to resume from level-0 and objset blocks. */
if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL))
return (B_FALSE);
/*
* We suspend if:
* - we have scanned for at least the minimum time (default 1 sec
* for scrub, 3 sec for resilver), and either we have sufficient
* dirty data that we are starting to write more quickly
* (default 30%), someone is explicitly waiting for this txg
* to complete, or we have used up all of the time in the txg
* timeout (default 5 sec).
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
* or
* - the scan queue has reached its memory use limit
*/
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
if ((NSEC2MSEC(scan_time_ns) > mintime &&
(scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa) ||
(zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn)) ||
!ddt_walk_ready(scn->scn_dp->dp_spa)) {
if (zb && zb->zb_level == ZB_ROOT_LEVEL) {
dprintf("suspending at first available bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
zb->zb_objset, 0, 0, 0);
} else if (zb != NULL) {
dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
scn->scn_phys.scn_bookmark = *zb;
} else {
#ifdef ZFS_DEBUG
dsl_scan_phys_t *scnp = &scn->scn_phys;
dprintf("suspending at at DDT bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
#endif
}
scn->scn_suspending = B_TRUE;
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
dsl_error_scrub_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
/*
* We suspend if:
* - we have scrubbed for at least the minimum time (default 1 sec
* for error scrub), someone is explicitly waiting for this txg
* to complete, or we have used up all of the time in the txg
* timeout (default 5 sec).
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
*/
uint64_t curr_time_ns = gethrtime();
uint64_t error_scrub_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
int mintime = zfs_scrub_min_time_ms;
if ((NSEC2MSEC(error_scrub_time_ns) > mintime &&
(txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa)) {
if (zb) {
dprintf("error scrub suspending at bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
}
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct zil_scan_arg {
dsl_pool_t *zsa_dp;
zil_header_t *zsa_zh;
} zil_scan_arg_t;
static int
dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t claim_txg)
{
(void) zilog;
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) ||
BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* One block ("stubby") can be allocated a long time ago; we
* want to visit that one because it has been allocated
* (on-disk) even if it hasn't been claimed (even though for
* scrub there's nothing to do to it).
*/
if (claim_txg == 0 &&
BP_GET_LOGICAL_BIRTH(bp) >= spa_min_claim_txg(dp->dp_spa))
return (0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
return (0);
}
static int
dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
uint64_t claim_txg)
{
(void) zilog;
if (lrc->lrc_txtype == TX_WRITE) {
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
const lr_write_t *lr = (const lr_write_t *)lrc;
const blkptr_t *bp = &lr->lr_blkptr;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) ||
BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* birth can be < claim_txg if this record's txg is
* already txg sync'ed (but this log block contains
* other records that are not synced)
*/
if (claim_txg == 0 || BP_GET_LOGICAL_BIRTH(bp) < claim_txg)
return (0);
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
}
return (0);
}
static void
dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
{
uint64_t claim_txg = zh->zh_claim_txg;
zil_scan_arg_t zsa = { dp, zh };
zilog_t *zilog;
ASSERT(spa_writeable(dp->dp_spa));
/*
* We only want to visit blocks that have been claimed but not yet
* replayed (or, in read-only mode, blocks that *would* be claimed).
*/
if (claim_txg == 0)
return;
zilog = zil_alloc(dp->dp_meta_objset, zh);
(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
claim_txg, B_FALSE);
zil_free(zilog);
}
/*
* We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
* here is to sort the AVL tree by the order each block will be needed.
*/
static int
scan_prefetch_queue_compare(const void *a, const void *b)
{
const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
return (zbookmark_compare(spc_a->spc_datablkszsec,
spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
}
static void
scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag)
{
if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
zfs_refcount_destroy(&spc->spc_refcnt);
kmem_free(spc, sizeof (scan_prefetch_ctx_t));
}
}
static scan_prefetch_ctx_t *
scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag)
{
scan_prefetch_ctx_t *spc;
spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
zfs_refcount_create(&spc->spc_refcnt);
zfs_refcount_add(&spc->spc_refcnt, tag);
spc->spc_scn = scn;
if (dnp != NULL) {
spc->spc_datablkszsec = dnp->dn_datablkszsec;
spc->spc_indblkshift = dnp->dn_indblkshift;
spc->spc_root = B_FALSE;
} else {
spc->spc_datablkszsec = 0;
spc->spc_indblkshift = 0;
spc->spc_root = B_TRUE;
}
return (spc);
}
static void
scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag)
{
zfs_refcount_add(&spc->spc_refcnt, tag);
}
static void
scan_ds_prefetch_queue_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
void *cookie = NULL;
scan_prefetch_issue_ctx_t *spic = NULL;
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue,
&cookie)) != NULL) {
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
const zbookmark_phys_t *zb)
{
zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
dnode_phys_t tmp_dnp;
dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
if (zb->zb_objset != last_zb->zb_objset)
return (B_TRUE);
if ((int64_t)zb->zb_object < 0)
return (B_FALSE);
tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
if (zbookmark_subtree_completed(dnp, zb, last_zb))
return (B_TRUE);
return (B_FALSE);
}
static void
dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
{
avl_index_t idx;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp))
return;
if (BP_IS_HOLE(bp) ||
BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET))
return;
if (dsl_scan_check_prefetch_resume(spc, zb))
return;
scan_prefetch_ctx_add_ref(spc, scn);
spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
spic->spic_spc = spc;
spic->spic_bp = *bp;
spic->spic_zb = *zb;
/*
* Add the IO to the queue of blocks to prefetch. This allows us to
* prioritize blocks that we will need first for the main traversal
* thread.
*/
mutex_enter(&spa->spa_scrub_lock);
if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
/* this block is already queued for prefetch */
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
scan_prefetch_ctx_rele(spc, scn);
mutex_exit(&spa->spa_scrub_lock);
return;
}
avl_insert(&scn->scn_prefetch_queue, spic, idx);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
static void
dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
uint64_t objset, uint64_t object)
{
int i;
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
return;
SET_BOOKMARK(&zb, objset, object, 0, 0);
spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
for (i = 0; i < dnp->dn_nblkptr; i++) {
zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
zb.zb_blkid = i;
dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zb.zb_level = 0;
zb.zb_blkid = DMU_SPILL_BLKID;
dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb);
}
scan_prefetch_ctx_rele(spc, FTAG);
}
static void
dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *private)
{
(void) zio;
scan_prefetch_ctx_t *spc = private;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
/* broadcast that the IO has completed for rate limiting purposes */
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
/* if there was an error or we are done prefetching, just cleanup */
if (buf == NULL || scn->scn_prefetch_stop)
goto out;
if (BP_GET_LEVEL(bp) > 0) {
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
zbookmark_phys_t czb;
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1, zb->zb_blkid * epb + i);
dsl_scan_prefetch(spc, cbp, &czb);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_prefetch_dnode(scn, cdnp,
zb->zb_objset, zb->zb_blkid * epb + i);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
objset_phys_t *osp = buf->b_data;
dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
zb->zb_objset, DMU_META_DNODE_OBJECT);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
if (OBJSET_BUF_HAS_PROJECTUSED(buf)) {
dsl_scan_prefetch_dnode(scn,
&osp->os_projectused_dnode, zb->zb_objset,
DMU_PROJECTUSED_OBJECT);
}
dsl_scan_prefetch_dnode(scn,
&osp->os_groupused_dnode, zb->zb_objset,
DMU_GROUPUSED_OBJECT);
dsl_scan_prefetch_dnode(scn,
&osp->os_userused_dnode, zb->zb_objset,
DMU_USERUSED_OBJECT);
}
}
out:
if (buf != NULL)
arc_buf_destroy(buf, private);
scan_prefetch_ctx_rele(spc, scn);
}
static void
dsl_scan_prefetch_thread(void *arg)
{
dsl_scan_t *scn = arg;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
/* loop until we are told to stop */
while (!scn->scn_prefetch_stop) {
arc_flags_t flags = ARC_FLAG_NOWAIT |
ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
mutex_enter(&spa->spa_scrub_lock);
/*
* Wait until we have an IO to issue and are not above our
* maximum in flight limit.
*/
while (!scn->scn_prefetch_stop &&
(avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
}
/* recheck if we should stop since we waited for the cv */
if (scn->scn_prefetch_stop) {
mutex_exit(&spa->spa_scrub_lock);
break;
}
/* remove the prefetch IO from the tree */
spic = avl_first(&scn->scn_prefetch_queue);
spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
avl_remove(&scn->scn_prefetch_queue, spic);
mutex_exit(&spa->spa_scrub_lock);
if (BP_IS_PROTECTED(&spic->spic_bp)) {
ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE ||
BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET);
ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0);
zio_flags |= ZIO_FLAG_RAW;
}
/* We don't need data L1 buffer since we do not prefetch L0. */
blkptr_t *bp = &spic->spic_bp;
if (BP_GET_LEVEL(bp) == 1 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET)
flags |= ARC_FLAG_NO_BUF;
/* issue the prefetch asynchronously */
(void) arc_read(scn->scn_zio_root, spa, bp,
dsl_scan_prefetch_cb, spic->spic_spc, ZIO_PRIORITY_SCRUB,
zio_flags, &flags, &spic->spic_zb);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT(scn->scn_prefetch_stop);
/* free any prefetches we didn't get to complete */
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
avl_remove(&scn->scn_prefetch_queue, spic);
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
const zbookmark_phys_t *zb)
{
/*
* We never skip over user/group accounting objects (obj<0)
*/
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
(int64_t)zb->zb_object >= 0) {
/*
* If we already visited this bp & everything below (in
* a prior txg sync), don't bother doing it again.
*/
if (zbookmark_subtree_completed(dnp, zb,
&scn->scn_phys.scn_bookmark))
return (B_TRUE);
/*
* If we found the block we're trying to resume from, or
* we went past it, zero it out to indicate that it's OK
* to start checking for suspending again.
*/
if (zbookmark_subtree_tbd(dnp, zb,
&scn->scn_phys.scn_bookmark)) {
dprintf("resuming at %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb));
}
}
return (B_FALSE);
}
static void dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx);
inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
/*
* Return nonzero on i/o error.
* Return new buf to write out in *bufp.
*/
inline __attribute__((always_inline)) static int
dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
int err;
ASSERT(!BP_IS_REDACTED(bp));
/*
* There is an unlikely case of encountering dnodes with contradicting
* dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created
* or modified before commit 4254acb was merged. As it is not possible
* to know which of the two is correct, report an error.
*/
if (dnp != NULL &&
dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) {
scn->scn_phys.scn_errors++;
spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp));
return (SET_ERROR(EINVAL));
}
if (BP_GET_LEVEL(bp) > 0) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
dsl_scan_visitbp(cbp, &czb, dnp,
ds, scn, ostype, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
arc_flags_t flags = ARC_FLAG_WAIT;
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
arc_buf_t *buf;
if (BP_IS_PROTECTED(bp)) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
zio_flags |= ZIO_FLAG_RAW;
}
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_visitdnode(scn, ds, ostype,
cdnp, zb->zb_blkid * epb + i, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
osp = buf->b_data;
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
/*
* We also always visit user/group/project accounting
* objects, and never skip them, even if we are
* suspending. This is necessary so that the
* space deltas from this txg get integrated.
*/
if (OBJSET_BUF_HAS_PROJECTUSED(buf))
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_userused_dnode,
DMU_USERUSED_OBJECT, tx);
}
arc_buf_destroy(buf, &buf);
} else if (!zfs_blkptr_verify(spa, bp,
BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
/*
* Sanity check the block pointer contents, this is handled
* by arc_read() for the cases above.
*/
scn->scn_phys.scn_errors++;
spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp));
return (SET_ERROR(EINVAL));
}
return (0);
}
inline __attribute__((always_inline)) static void
dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
dmu_objset_type_t ostype, dnode_phys_t *dnp,
uint64_t object, dmu_tx_t *tx)
{
int j;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
dnp->dn_nlevels - 1, j);
dsl_scan_visitbp(&dnp->dn_blkptr[j],
&czb, dnp, ds, scn, ostype, tx);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
0, DMU_SPILL_BLKID);
dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
&czb, dnp, ds, scn, ostype, tx);
}
}
/*
* The arguments are in this order because mdb can only print the
* first 5; we want them to be useful.
*/
static void
dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
if (dsl_scan_check_suspend(scn, zb))
return;
if (dsl_scan_check_resume(scn, dnp, zb))
return;
scn->scn_visited_this_txg++;
if (BP_IS_HOLE(bp)) {
scn->scn_holes_this_txg++;
return;
}
if (BP_IS_REDACTED(bp)) {
ASSERT(dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS));
return;
}
/*
* Check if this block contradicts any filesystem flags.
*/
spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS;
if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE)
ASSERT(dsl_dataset_feature_is_active(ds, f));
f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
if (f != SPA_FEATURE_NONE)
ASSERT(dsl_dataset_feature_is_active(ds, f));
f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
if (f != SPA_FEATURE_NONE)
ASSERT(dsl_dataset_feature_is_active(ds, f));
if (BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) {
scn->scn_lt_min_this_txg++;
return;
}
if (dsl_scan_recurse(scn, ds, ostype, dnp, bp, zb, tx) != 0)
return;
/*
* If dsl_scan_ddt() has already visited this block, it will have
* already done any translations or scrubbing, so don't call the
* callback again.
*/
if (ddt_class_contains(dp->dp_spa,
scn->scn_phys.scn_ddt_class_max, bp)) {
scn->scn_ddt_contained_this_txg++;
return;
}
/*
* If this block is from the future (after cur_max_txg), then we
* are doing this on behalf of a deleted snapshot, and we will
* revisit the future block on the next pass of this dataset.
* Don't scan it now unless we need to because something
* under it was modified.
*/
if (BP_GET_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
scn->scn_gt_max_this_txg++;
return;
}
scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
}
static void
dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
dmu_tx_t *tx)
{
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
SET_BOOKMARK(&scn->scn_prefetch_bookmark,
zb.zb_objset, 0, 0, 0);
} else {
scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
}
scn->scn_objsets_visited_this_txg++;
spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
dsl_scan_prefetch(spc, bp, &zb);
scan_prefetch_ctx_rele(spc, FTAG);
dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
dprintf_ds(ds, "finished scan%s", "");
}
static void
ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
{
if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
if (ds->ds_is_snapshot) {
/*
* Note:
* - scn_cur_{min,max}_txg stays the same.
* - Setting the flag is not really necessary if
* scn_cur_max_txg == scn_max_txg, because there
* is nothing after this snapshot that we care
* about. However, we set it anyway and then
* ignore it when we retraverse it in
* dsl_scan_visitds().
*/
scn_phys->scn_bookmark.zb_objset =
dsl_dataset_phys(ds)->ds_next_snap_obj;
zfs_dbgmsg("destroying ds %llu on %s; currently "
"traversing; reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
} else {
SET_BOOKMARK(&scn_phys->scn_bookmark,
ZB_DESTROYED_OBJSET, 0, 0, 0);
zfs_dbgmsg("destroying ds %llu on %s; currently "
"traversing; reset bookmark to -1,0,0,0",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name);
}
}
}
/*
* Invoked when a dataset is destroyed. We need to make sure that:
*
* 1) If it is the dataset that was currently being scanned, we write
* a new dsl_scan_phys_t and marking the objset reference in it
* as destroyed.
* 2) Remove it from the work queue, if it was present.
*
* If the dataset was actually a snapshot, instead of marking the dataset
* as destroyed, we instead substitute the next snapshot in line.
*/
void
dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ds_destroyed_scn_phys(ds, &scn->scn_phys);
ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
if (ds->ds_is_snapshot)
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
if (ds->ds_is_snapshot) {
/*
* We keep the same mintxg; it could be >
* ds_creation_txg if the previous snapshot was
* deleted too.
*/
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_next_snap_obj,
mintxg, tx) == 0);
zfs_dbgmsg("destroying ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
} else {
zfs_dbgmsg("destroying ds %llu on %s; in queue; "
"removing",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name);
}
}
/*
* dsl_scan_sync() should be called after this, and should sync
* out our changed state, but just to be safe, do it here.
*/
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds->ds_object) {
scn_bookmark->zb_objset =
dsl_dataset_phys(ds)->ds_prev_snap_obj;
zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
}
/*
* Called when a dataset is snapshotted. If we were currently traversing
* this snapshot, we reset our bookmark to point at the newly created
* snapshot. We also modify our work queue to remove the old snapshot and
* replace with the new one.
*/
void
dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
zfs_dbgmsg("snapshotting ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds1->ds_object) {
scn_bookmark->zb_objset = ds2->ds_object;
zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds1->ds_object,
ds1->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)ds2->ds_object);
} else if (scn_bookmark->zb_objset == ds2->ds_object) {
scn_bookmark->zb_objset = ds1->ds_object;
zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds2->ds_object,
ds2->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)ds1->ds_object);
}
}
/*
* Called when an origin dataset and its clone are swapped. If we were
* currently traversing the dataset, we need to switch to traversing the
* newly promoted clone.
*/
void
dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds1->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg1, mintxg2;
boolean_t ds1_queued, ds2_queued;
if (!dsl_scan_is_running(scn))
return;
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
/*
* Handle the in-memory scan queue.
*/
ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1);
ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2);
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* The swapping code below would not handle this case correctly,
* since we can't insert ds2 if it is already there. That's
* because scan_ds_queue_insert() prohibits a duplicate insert
* and panics.
*/
} else if (ds1_queued) {
scan_ds_queue_remove(scn, ds1->ds_object);
scan_ds_queue_insert(scn, ds2->ds_object, mintxg1);
} else if (ds2_queued) {
scan_ds_queue_remove(scn, ds2->ds_object);
scan_ds_queue_insert(scn, ds1->ds_object, mintxg2);
}
/*
* Handle the on-disk scan queue.
* The on-disk state is an out-of-date version of the in-memory state,
* so the in-memory and on-disk values for ds1_queued and ds2_queued may
* be different. Therefore we need to apply the swap logic to the
* on-disk state independently of the in-memory state.
*/
ds1_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0;
ds2_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0;
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* Alternatively, we could check for EEXIST from
* zap_add_int_key() and back out to the original state, but
* that would be more work than checking for this case upfront.
*/
} else if (ds1_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx));
zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds1->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)ds2->ds_object);
} else if (ds2_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx));
zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds2->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)ds1->ds_object);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static int
enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
uint64_t originobj = *(uint64_t *)arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
return (0);
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
dsl_dataset_rele(ds, FTAG);
if (err)
return (err);
ds = prev;
}
mutex_enter(&scn->scn_queue_lock);
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
mutex_exit(&scn->scn_queue_lock);
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (scn->scn_phys.scn_cur_min_txg >=
scn->scn_phys.scn_max_txg) {
/*
* This can happen if this snapshot was created after the
* scan started, and we already completed a previous snapshot
* that was created after the scan started. This snapshot
* only references blocks with:
*
* birth < our ds_creation_txg
* cur_min_txg is no less than ds_creation_txg.
* We have already visited these blocks.
* or
* birth > scn_max_txg
* The scan requested not to visit these blocks.
*
* Subsequent snapshots (and clones) can reference our
* blocks, or blocks with even higher birth times.
* Therefore we do not need to visit them either,
* so we do not add them to the work queue.
*
* Note that checking for cur_min_txg >= cur_max_txg
* is not sufficient, because in that case we may need to
* visit subsequent snapshots. This happens when min_txg > 0,
* which raises cur_min_txg. In this case we will visit
* this dataset but skip all of its blocks, because the
* rootbp's birth time is < cur_min_txg. Then we will
* add the next snapshots/clones to the work queue.
*/
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
"cur_min_txg (%llu) >= max_txg (%llu)",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_max_txg);
kmem_free(dsname, MAXNAMELEN);
goto out;
}
/*
* Only the ZIL in the head (non-snapshot) is valid. Even though
* snapshots can have ZIL block pointers (which may be the same
* BP as in the head), they must be ignored. In addition, $ORIGIN
* doesn't have a objset (i.e. its ds_bp is a hole) so we don't
* need to look for a ZIL in it either. So we traverse the ZIL here,
* rather than in scan_recurse(), because the regular snapshot
* block-sharing rules don't apply to it.
*/
if (!dsl_dataset_is_snapshot(ds) &&
(dp->dp_origin_snap == NULL ||
ds->ds_dir != dp->dp_origin_snap->ds_dir)) {
objset_t *os;
if (dmu_objset_from_ds(ds, &os) != 0) {
goto out;
}
dsl_scan_zil(dp, &os->os_zil_header);
}
/*
* Iterate over the bps in this ds.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
"suspending=%u",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_cur_max_txg,
(int)scn->scn_suspending);
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
if (scn->scn_suspending)
goto out;
/*
* We've finished this pass over this dataset.
*/
/*
* If we did not completely visit this dataset, do another pass.
*/
if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
zfs_dbgmsg("incomplete pass on %s; visiting again",
dp->dp_spa->spa_name);
scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
scan_ds_queue_insert(scn, ds->ds_object,
scn->scn_phys.scn_cur_max_txg);
goto out;
}
/*
* Add descendant datasets to work queue.
*/
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj,
dsl_dataset_phys(ds)->ds_creation_txg);
}
if (dsl_dataset_phys(ds)->ds_num_children > 1) {
boolean_t usenext = B_FALSE;
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
uint64_t count;
/*
* A bug in a previous version of the code could
* cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a
* missing entry. Therefore we can only use the
* next_clones_obj when its count is correct.
*/
int err = zap_count(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
if (err == 0 &&
count == dsl_dataset_phys(ds)->ds_num_children - 1)
usenext = B_TRUE;
}
if (usenext) {
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za->za_name, NULL),
dsl_dataset_phys(ds)->ds_creation_txg);
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
} else {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_clones_cb, &ds->ds_object,
DS_FIND_CHILDREN));
}
}
out:
dsl_dataset_rele(ds, FTAG);
}
static int
enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
(void) arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err) {
dsl_dataset_rele(ds, FTAG);
return (err);
}
/*
* If this is a clone, we don't need to worry about it for now.
*/
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(prev, FTAG);
return (0);
}
dsl_dataset_rele(ds, FTAG);
ds = prev;
}
mutex_enter(&scn->scn_queue_lock);
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
mutex_exit(&scn->scn_queue_lock);
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
ddt_t *ddt, ddt_lightweight_entry_t *ddlwe, dmu_tx_t *tx)
{
(void) tx;
const ddt_key_t *ddk = &ddlwe->ddlwe_key;
blkptr_t bp;
zbookmark_phys_t zb = { 0 };
if (!dsl_scan_is_running(scn))
return;
/*
* This function is special because it is the only thing
* that can add scan_io_t's to the vdev scan queues from
* outside dsl_scan_sync(). For the most part this is ok
* as long as it is called from within syncing context.
* However, dsl_scan_sync() expects that no new sio's will
* be added between when all the work for a scan is done
* and the next txg when the scan is actually marked as
* completed. This check ensures we do not issue new sio's
* during this period.
*/
if (scn->scn_done_txg != 0)
return;
for (int p = 0; p < DDT_NPHYS(ddt); p++) {
ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
uint64_t phys_birth = ddt_phys_birth(&ddlwe->ddlwe_phys, v);
if (phys_birth == 0 || phys_birth > scn->scn_phys.scn_max_txg)
continue;
ddt_bp_create(checksum, ddk, &ddlwe->ddlwe_phys, v, &bp);
scn->scn_visited_this_txg++;
scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
}
}
/*
* Scrub/dedup interaction.
*
* If there are N references to a deduped block, we don't want to scrub it
* N times -- ideally, we should scrub it exactly once.
*
* We leverage the fact that the dde's replication class (ddt_class_t)
* is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
* (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
*
* To prevent excess scrubbing, the scrub begins by walking the DDT
* to find all blocks with refcnt > 1, and scrubs each of these once.
* Since there are two replication classes which contain blocks with
* refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
* Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
*
* There would be nothing more to say if a block's refcnt couldn't change
* during a scrub, but of course it can so we must account for changes
* in a block's replication class.
*
* Here's an example of what can occur:
*
* If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
* when visited during the top-down scrub phase, it will be scrubbed twice.
* This negates our scrub optimization, but is otherwise harmless.
*
* If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
* on each visit during the top-down scrub phase, it will never be scrubbed.
* To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
* reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
* DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
* while a scrub is in progress, it scrubs the block right then.
*/
static void
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
{
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
ddt_lightweight_entry_t ddlwe = {0};
int error;
uint64_t n = 0;
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &ddlwe)) == 0) {
ddt_t *ddt;
if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
break;
dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
(longlong_t)ddb->ddb_class,
(longlong_t)ddb->ddb_type,
(longlong_t)ddb->ddb_checksum,
(longlong_t)ddb->ddb_cursor);
/* There should be no pending changes to the dedup table */
ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
ASSERT(avl_first(&ddt->ddt_tree) == NULL);
dsl_scan_ddt_entry(scn, ddb->ddb_checksum, ddt, &ddlwe, tx);
n++;
if (dsl_scan_check_suspend(scn, NULL))
break;
}
if (error == EAGAIN) {
dsl_scan_check_suspend(scn, NULL);
error = 0;
zfs_dbgmsg("waiting for ddt to become ready for scan "
"on %s with class_max = %u; suspending=%u",
scn->scn_dp->dp_spa->spa_name,
(int)scn->scn_phys.scn_ddt_class_max,
(int)scn->scn_suspending);
} else
zfs_dbgmsg("scanned %llu ddt entries on %s with "
"class_max = %u; suspending=%u", (longlong_t)n,
scn->scn_dp->dp_spa->spa_name,
(int)scn->scn_phys.scn_ddt_class_max,
(int)scn->scn_suspending);
ASSERT(error == 0 || error == ENOENT);
ASSERT(error != ENOENT ||
ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
}
static uint64_t
dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
{
uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
if (ds->ds_is_snapshot)
return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
return (smt);
}
static void
dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
{
scan_ds_t *sds;
dsl_pool_t *dp = scn->scn_dp;
if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
scn->scn_phys.scn_ddt_class_max) {
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_ddt(scn, tx);
if (scn->scn_suspending)
return;
}
if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
/* First do the MOS & ORIGIN */
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_visit_rootbp(scn, NULL,
&dp->dp_meta_rootbp, tx);
if (scn->scn_suspending)
return;
if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_cb, NULL, DS_FIND_CHILDREN));
} else {
dsl_scan_visitds(scn,
dp->dp_origin_snap->ds_object, tx);
}
ASSERT(!scn->scn_suspending);
} else if (scn->scn_phys.scn_bookmark.zb_objset !=
ZB_DESTROYED_OBJSET) {
uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
/*
* If we were suspended, continue from here. Note if the
* ds we were suspended on was deleted, the zb_objset may
* be -1, so we will skip this and find a new objset
* below.
*/
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/*
* In case we suspended right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t));
/*
* Keep pulling things out of the dataset avl queue. Updates to the
* persistent zap-object-as-queue happen only at checkpoints.
*/
while ((sds = avl_first(&scn->scn_queue)) != NULL) {
dsl_dataset_t *ds;
uint64_t dsobj = sds->sds_dsobj;
uint64_t txg = sds->sds_txg;
/* dequeue and free the ds from the queue */
scan_ds_queue_remove(scn, dsobj);
sds = NULL;
/* set up min / max txg */
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (txg != 0) {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg, txg);
} else {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
}
scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
dsl_dataset_rele(ds, FTAG);
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/* No more objsets to fetch, we're done */
scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
ASSERT0(scn->scn_suspending);
}
static uint64_t
dsl_scan_count_data_disks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t i, leaves = 0;
for (i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache)
continue;
leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd);
}
return (leaves);
}
static void
scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
{
int i;
uint64_t cur_size = 0;
for (i = 0; i < BP_GET_NDVAS(bp); i++) {
cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
}
q->q_total_zio_size_this_txg += cur_size;
q->q_zios_this_txg++;
}
static void
scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
uint64_t end)
{
q->q_total_seg_size_this_txg += end - start;
q->q_segs_this_txg++;
}
static boolean_t
scan_io_queue_check_suspend(dsl_scan_t *scn)
{
/* See comment in dsl_scan_check_suspend() */
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
return ((NSEC2MSEC(scan_time_ns) > mintime &&
(scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
/*
* Given a list of scan_io_t's in io_list, this issues the I/Os out to
* disk. This consumes the io_list and frees the scan_io_t's. This is
* called when emptying queues, either when we're up against the memory
* limit or when we have finished scanning. Returns B_TRUE if we stopped
* processing the list before we finished. Any sios that were not issued
* will remain in the io_list.
*/
static boolean_t
scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
boolean_t suspended = B_FALSE;
while ((sio = list_head(io_list)) != NULL) {
blkptr_t bp;
if (scan_io_queue_check_suspend(scn)) {
suspended = B_TRUE;
break;
}
sio2bp(sio, &bp);
scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
&sio->sio_zb, queue);
(void) list_remove_head(io_list);
scan_io_queues_update_zio_stats(queue, &bp);
sio_free(sio);
}
return (suspended);
}
/*
* This function removes sios from an IO queue which reside within a given
- * range_seg_t and inserts them (in offset order) into a list. Note that
+ * zfs_range_seg_t and inserts them (in offset order) into a list. Note that
* we only ever return a maximum of 32 sios at once. If there are more sios
* to process within this segment that did not make it onto the list we
* return B_TRUE and otherwise B_FALSE.
*/
static boolean_t
-scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
+scan_io_queue_gather(dsl_scan_io_queue_t *queue, zfs_range_seg_t *rs,
+ list_t *list)
{
scan_io_t *srch_sio, *sio, *next_sio;
avl_index_t idx;
uint_t num_sios = 0;
int64_t bytes_issued = 0;
ASSERT(rs != NULL);
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
srch_sio = sio_alloc(1);
srch_sio->sio_nr_dvas = 1;
- SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr));
+ SIO_SET_OFFSET(srch_sio, zfs_rs_get_start(rs, queue->q_exts_by_addr));
/*
* The exact start of the extent might not contain any matching zios,
* so if that's the case, examine the next one in the tree.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio == NULL)
sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
- while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
+ while (sio != NULL && SIO_GET_OFFSET(sio) < zfs_rs_get_end(rs,
queue->q_exts_by_addr) && num_sios <= 32) {
- ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs,
+ ASSERT3U(SIO_GET_OFFSET(sio), >=, zfs_rs_get_start(rs,
queue->q_exts_by_addr));
- ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs,
+ ASSERT3U(SIO_GET_END_OFFSET(sio), <=, zfs_rs_get_end(rs,
queue->q_exts_by_addr));
next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
avl_remove(&queue->q_sios_by_addr, sio);
if (avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&queue->q_scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
bytes_issued += SIO_GET_ASIZE(sio);
num_sios++;
list_insert_tail(list, sio);
sio = next_sio;
}
/*
* We limit the number of sios we process at once to 32 to avoid
* biting off more than we can chew. If we didn't take everything
* in the segment we update it to reflect the work we were able to
* complete. Otherwise, we remove it from the range tree entirely.
*/
- if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
+ if (sio != NULL && SIO_GET_OFFSET(sio) < zfs_rs_get_end(rs,
queue->q_exts_by_addr)) {
- range_tree_adjust_fill(queue->q_exts_by_addr, rs,
+ zfs_range_tree_adjust_fill(queue->q_exts_by_addr, rs,
-bytes_issued);
- range_tree_resize_segment(queue->q_exts_by_addr, rs,
- SIO_GET_OFFSET(sio), rs_get_end(rs,
+ zfs_range_tree_resize_segment(queue->q_exts_by_addr, rs,
+ SIO_GET_OFFSET(sio), zfs_rs_get_end(rs,
queue->q_exts_by_addr) - SIO_GET_OFFSET(sio));
queue->q_last_ext_addr = SIO_GET_OFFSET(sio);
return (B_TRUE);
} else {
- uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr);
- uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr);
- range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart);
+ uint64_t rstart = zfs_rs_get_start(rs, queue->q_exts_by_addr);
+ uint64_t rend = zfs_rs_get_end(rs, queue->q_exts_by_addr);
+ zfs_range_tree_remove(queue->q_exts_by_addr, rstart, rend -
+ rstart);
queue->q_last_ext_addr = -1;
return (B_FALSE);
}
}
/*
* This is called from the queue emptying thread and selects the next
* extent from which we are to issue I/Os. The behavior of this function
* depends on the state of the scan, the current memory consumption and
* whether or not we are performing a scan shutdown.
* 1) We select extents in an elevator algorithm (LBA-order) if the scan
* needs to perform a checkpoint
* 2) We select the largest available extent if we are up against the
* memory limit.
* 3) Otherwise we don't select any extents.
*/
-static range_seg_t *
+static zfs_range_seg_t *
scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
- range_tree_t *rt = queue->q_exts_by_addr;
+ zfs_range_tree_t *rt = queue->q_exts_by_addr;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
ASSERT(scn->scn_is_sorted);
if (!scn->scn_checkpointing && !scn->scn_clearing)
return (NULL);
/*
* During normal clearing, we want to issue our largest segments
* first, keeping IO as sequential as possible, and leaving the
* smaller extents for later with the hope that they might eventually
* grow to larger sequential segments. However, when the scan is
* checkpointing, no new extents will be added to the sorting queue,
* so the way we are sorted now is as good as it will ever get.
* In this case, we instead switch to issuing extents in LBA order.
*/
if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) ||
zfs_scan_issue_strategy == 1)
- return (range_tree_first(rt));
+ return (zfs_range_tree_first(rt));
/*
* Try to continue previous extent if it is not completed yet. After
* shrink in scan_io_queue_gather() it may no longer be the best, but
* otherwise we leave shorter remnant every txg.
*/
uint64_t start;
uint64_t size = 1ULL << rt->rt_shift;
- range_seg_t *addr_rs;
+ zfs_range_seg_t *addr_rs;
if (queue->q_last_ext_addr != -1) {
start = queue->q_last_ext_addr;
- addr_rs = range_tree_find(rt, start, size);
+ addr_rs = zfs_range_tree_find(rt, start, size);
if (addr_rs != NULL)
return (addr_rs);
}
/*
* Nothing to continue, so find new best extent.
*/
uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL);
if (v == NULL)
return (NULL);
queue->q_last_ext_addr = start = *v << rt->rt_shift;
/*
* We need to get the original entry in the by_addr tree so we can
* modify it.
*/
- addr_rs = range_tree_find(rt, start, size);
+ addr_rs = zfs_range_tree_find(rt, start, size);
ASSERT3P(addr_rs, !=, NULL);
- ASSERT3U(rs_get_start(addr_rs, rt), ==, start);
- ASSERT3U(rs_get_end(addr_rs, rt), >, start);
+ ASSERT3U(zfs_rs_get_start(addr_rs, rt), ==, start);
+ ASSERT3U(zfs_rs_get_end(addr_rs, rt), >, start);
return (addr_rs);
}
static void
scan_io_queues_run_one(void *arg)
{
dsl_scan_io_queue_t *queue = arg;
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
boolean_t suspended = B_FALSE;
- range_seg_t *rs;
+ zfs_range_seg_t *rs;
scan_io_t *sio;
zio_t *zio;
list_t sio_list;
ASSERT(queue->q_scn->scn_is_sorted);
list_create(&sio_list, sizeof (scan_io_t),
offsetof(scan_io_t, sio_nodes.sio_list_node));
zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa,
NULL, NULL, NULL, ZIO_FLAG_CANFAIL);
mutex_enter(q_lock);
queue->q_zio = zio;
/* Calculate maximum in-flight bytes for this vdev. */
queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit *
(vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd)));
/* reset per-queue scan statistics for this txg */
queue->q_total_seg_size_this_txg = 0;
queue->q_segs_this_txg = 0;
queue->q_total_zio_size_this_txg = 0;
queue->q_zios_this_txg = 0;
/* loop until we run out of time or sios */
while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
uint64_t seg_start = 0, seg_end = 0;
boolean_t more_left;
ASSERT(list_is_empty(&sio_list));
/* loop while we still have sios left to process in this rs */
do {
scan_io_t *first_sio, *last_sio;
/*
* We have selected which extent needs to be
* processed next. Gather up the corresponding sios.
*/
more_left = scan_io_queue_gather(queue, rs, &sio_list);
ASSERT(!list_is_empty(&sio_list));
first_sio = list_head(&sio_list);
last_sio = list_tail(&sio_list);
seg_end = SIO_GET_END_OFFSET(last_sio);
if (seg_start == 0)
seg_start = SIO_GET_OFFSET(first_sio);
/*
* Issuing sios can take a long time so drop the
* queue lock. The sio queue won't be updated by
* other threads since we're in syncing context so
* we can be sure that our trees will remain exactly
* as we left them.
*/
mutex_exit(q_lock);
suspended = scan_io_queue_issue(queue, &sio_list);
mutex_enter(q_lock);
if (suspended)
break;
} while (more_left);
/* update statistics for debugging purposes */
scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
if (suspended)
break;
}
/*
* If we were suspended in the middle of processing,
* requeue any unfinished sios and exit.
*/
while ((sio = list_remove_head(&sio_list)) != NULL)
scan_io_queue_insert_impl(queue, sio);
queue->q_zio = NULL;
mutex_exit(q_lock);
zio_nowait(zio);
list_destroy(&sio_list);
}
/*
* Performs an emptying run on all scan queues in the pool. This just
* punches out one thread per top-level vdev, each of which processes
* only that vdev's scan queue. We can parallelize the I/O here because
* we know that each queue's I/Os only affect its own top-level vdev.
*
* This function waits for the queue runs to complete, and must be
* called from dsl_scan_sync (or in general, syncing context).
*/
static void
scan_io_queues_run(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(scn->scn_is_sorted);
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (scn->scn_queues_pending == 0)
return;
if (scn->scn_taskq == NULL) {
int nthreads = spa->spa_root_vdev->vdev_children;
/*
* We need to make this taskq *always* execute as many
* threads in parallel as we have top-level vdevs and no
* less, otherwise strange serialization of the calls to
* scan_io_queues_run_one can occur during spa_sync runs
* and that significantly impacts performance.
*/
scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads,
minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE);
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
mutex_enter(&vd->vdev_scan_io_queue_lock);
if (vd->vdev_scan_io_queue != NULL) {
VERIFY(taskq_dispatch(scn->scn_taskq,
scan_io_queues_run_one, vd->vdev_scan_io_queue,
TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* Wait for the queues to finish issuing their IOs for this run
* before we return. There may still be IOs in flight at this
* point.
*/
taskq_wait(scn->scn_taskq);
}
static boolean_t
dsl_scan_async_block_should_pause(dsl_scan_t *scn)
{
uint64_t elapsed_nanosecs;
if (zfs_recover)
return (B_FALSE);
if (zfs_async_block_max_blocks != 0 &&
scn->scn_visited_this_txg >= zfs_async_block_max_blocks) {
return (B_TRUE);
}
if (zfs_max_async_dedup_frees != 0 &&
scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) {
return (B_TRUE);
}
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
(NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
txg_sync_waiting(scn->scn_dp)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
static int
dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_scan_t *scn = arg;
if (!scn->scn_is_bptree ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
}
zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
dmu_tx_get_txg(tx), bp, 0));
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
scn->scn_visited_this_txg++;
if (BP_GET_DEDUP(bp))
scn->scn_dedup_frees_this_txg++;
return (0);
}
static void
dsl_scan_update_stats(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t i;
uint64_t seg_size_total = 0, zio_size_total = 0;
uint64_t seg_count_total = 0, zio_count_total = 0;
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
if (queue == NULL)
continue;
seg_size_total += queue->q_total_seg_size_this_txg;
zio_size_total += queue->q_total_zio_size_this_txg;
seg_count_total += queue->q_segs_this_txg;
zio_count_total += queue->q_zios_this_txg;
}
if (seg_count_total == 0 || zio_count_total == 0) {
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_zios_this_txg = 0;
return;
}
scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
scn->scn_segs_this_txg = seg_count_total;
scn->scn_zios_this_txg = zio_count_total;
}
static int
bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (dsl_scan_free_block_cb(arg, bp, tx));
}
static int
dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
dsl_scan_t *scn = arg;
const dva_t *dva = &bp->blk_dva[0];
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
DVA_GET_ASIZE(dva), tx);
scn->scn_visited_this_txg++;
return (0);
}
boolean_t
dsl_scan_active(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t used = 0, comp, uncomp;
boolean_t clones_left;
if (spa->spa_load_state != SPA_LOAD_NONE)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
(scn->scn_async_destroying && !scn->scn_async_stalled))
return (B_TRUE);
if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
&used, &comp, &uncomp);
}
clones_left = spa_livelist_delete_check(spa);
return ((used != 0) || (clones_left));
}
boolean_t
dsl_errorscrub_active(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
if (spa->spa_load_state != SPA_LOAD_NONE)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
if (dsl_errorscrubbing(scn->scn_dp))
return (B_TRUE);
return (B_FALSE);
}
static boolean_t
dsl_scan_check_deferred(vdev_t *vd)
{
boolean_t need_resilver = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++) {
need_resilver |=
dsl_scan_check_deferred(vd->vdev_child[c]);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (need_resilver);
if (!vd->vdev_resilver_deferred)
need_resilver = B_TRUE;
return (need_resilver);
}
static boolean_t
dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_t *vd;
vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops == &vdev_indirect_ops) {
/*
* The indirect vdev can point to multiple
* vdevs. For simplicity, always create
* the resilver zio_t. zio_vdev_io_start()
* will bypass the child resilver i/o's if
* they are on vdevs that don't have DTL's.
*/
return (B_TRUE);
}
if (DVA_GET_GANG(dva)) {
/*
* Gang members may be spread across multiple
* vdevs, so the best estimate we have is the
* scrub range, which has already been checked.
* XXX -- it would be better to change our
* allocation policy to ensure that all
* gang members reside on the same vdev.
*/
return (B_TRUE);
}
/*
* Check if the top-level vdev must resilver this offset.
* When the offset does not intersect with a dirty leaf DTL
* then it may be possible to skip the resilver IO. The psize
* is provided instead of asize to simplify the check for RAIDZ.
*/
if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth))
return (B_FALSE);
/*
* Check that this top-level vdev has a device under it which
* is resilvering and is not deferred.
*/
if (!dsl_scan_check_deferred(vd))
return (B_FALSE);
return (B_TRUE);
}
static int
dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
int err = 0;
if (spa_suspend_async_destroy(spa))
return (0);
if (zfs_free_bpobj_enabled &&
spa_version(spa) >= SPA_VERSION_DEADLISTS) {
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bpobj_iterate(&dp->dp_free_bpobj,
bpobj_dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
}
if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
ASSERT(scn->scn_async_destroying);
scn->scn_is_bptree = B_TRUE;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bptree_iterate(dp->dp_meta_objset,
dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err == EIO || err == ECKSUM) {
err = 0;
} else if (err != 0 && err != ERESTART) {
zfs_panic_recover("error %u from "
"traverse_dataset_destroyed()", err);
}
if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
/* finished; deactivate async destroy feature */
spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
ASSERT(!spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY));
VERIFY0(zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, tx));
VERIFY0(bptree_free(dp->dp_meta_objset,
dp->dp_bptree_obj, tx));
dp->dp_bptree_obj = 0;
scn->scn_async_destroying = B_FALSE;
scn->scn_async_stalled = B_FALSE;
} else {
/*
* If we didn't make progress, mark the async
* destroy as stalled, so that we will not initiate
* a spa_sync() on its behalf. Note that we only
* check this if we are not finished, because if the
* bptree had no blocks for us to visit, we can
* finish without "making progress".
*/
scn->scn_async_stalled =
(scn->scn_visited_this_txg == 0);
}
}
if (scn->scn_visited_this_txg) {
zfs_dbgmsg("freed %llu blocks in %llums from "
"free_bpobj/bptree on %s in txg %llu; err=%u",
(longlong_t)scn->scn_visited_this_txg,
(longlong_t)
NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
spa->spa_name, (longlong_t)tx->tx_txg, err);
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
/*
* Write out changes to the DDT and the BRT that may be required
* as a result of the blocks freed. This ensures that the DDT
* and the BRT are clean when a scrub/resilver runs.
*/
ddt_sync(spa, tx->tx_txg);
brt_sync(spa, tx->tx_txg);
}
if (err != 0)
return (err);
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
zfs_free_leak_on_eio &&
(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
/*
* We have finished background destroying, but there is still
* some space left in the dp_free_dir. Transfer this leaked
* space to the dp_leak_dir.
*/
if (dp->dp_leak_dir == NULL) {
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
LEAK_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
LEAK_DIR_NAME, &dp->dp_leak_dir));
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
-dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
}
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
!spa_livelist_delete_check(spa)) {
/* finished; verify that space accounting went to zero */
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
}
spa_notify_waiters(spa);
EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ));
if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_OBSOLETE_COUNTS));
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
err = bpobj_iterate(&dp->dp_obsolete_bpobj,
dsl_scan_obsolete_block_cb, scn, tx);
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
dsl_pool_destroy_obsolete_bpobj(dp, tx);
}
return (0);
}
static void
name_to_bookmark(char *buf, zbookmark_phys_t *zb)
{
zb->zb_objset = zfs_strtonum(buf, &buf);
ASSERT(*buf == ':');
zb->zb_object = zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == ':');
zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == ':');
zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == '\0');
}
static void
name_to_object(char *buf, uint64_t *obj)
{
*obj = zfs_strtonum(buf, &buf);
ASSERT(*buf == '\0');
}
static void
read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb)
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
objset_t *os;
if (dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds) != 0)
return;
if (dmu_objset_from_ds(ds, &os) != 0) {
dsl_dataset_rele(ds, FTAG);
return;
}
/*
* If the key is not loaded dbuf_dnode_findbp() will error out with
* EACCES. However in that case dnode_hold() will eventually call
* dbuf_read()->zio_wait() which may call spa_log_error(). This will
* lead to a deadlock due to us holding the mutex spa_errlist_lock.
* Avoid this by checking here if the keys are loaded, if not return.
* If the keys are not loaded the head_errlog feature is meaningless
* as we cannot figure out the birth txg of the block pointer.
*/
if (dsl_dataset_get_keystatus(ds->ds_dir) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
dsl_dataset_rele(ds, FTAG);
return;
}
dnode_t *dn;
blkptr_t bp;
if (dnode_hold(os, zb.zb_object, FTAG, &dn) != 0) {
dsl_dataset_rele(ds, FTAG);
return;
}
rw_enter(&dn->dn_struct_rwlock, RW_READER);
int error = dbuf_dnode_findbp(dn, zb.zb_level, zb.zb_blkid, &bp, NULL,
NULL);
if (error) {
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
dsl_dataset_rele(ds, FTAG);
return;
}
if (!error && BP_IS_HOLE(&bp)) {
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
dsl_dataset_rele(ds, FTAG);
return;
}
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW |
ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB;
/* If it's an intent log block, failure is expected. */
if (zb.zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE;
ASSERT(!BP_IS_EMBEDDED(&bp));
scan_exec_io(dp, &bp, zio_flags, &zb, NULL);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
dsl_dataset_rele(ds, FTAG);
}
/*
* We keep track of the scrubbed error blocks in "count". This will be used
* when deciding whether we exceeded zfs_scrub_error_blocks_per_txg. This
* function is modelled after check_filesystem().
*/
static int
scrub_filesystem(spa_t *spa, uint64_t fs, zbookmark_err_phys_t *zep,
int *count)
{
dsl_dataset_t *ds;
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
int error = dsl_dataset_hold_obj(dp, fs, FTAG, &ds);
if (error != 0)
return (error);
uint64_t latest_txg;
uint64_t txg_to_consider = spa->spa_syncing_txg;
boolean_t check_snapshot = B_TRUE;
error = find_birth_txg(ds, zep, &latest_txg);
/*
* If find_birth_txg() errors out, then err on the side of caution and
* proceed. In worst case scenario scrub all objects. If zep->zb_birth
* is 0 (e.g. in case of encryption with unloaded keys) also proceed to
* scrub all objects.
*/
if (error == 0 && zep->zb_birth == latest_txg) {
/* Block neither free nor re written. */
zbookmark_phys_t zb;
zep_to_zb(fs, zep, &zb);
scn->scn_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/* We have already acquired the config lock for spa */
read_by_block_level(scn, zb);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
scn->errorscrub_phys.dep_examined++;
scn->errorscrub_phys.dep_to_examine--;
(*count)++;
if ((*count) == zfs_scrub_error_blocks_per_txg ||
dsl_error_scrub_check_suspend(scn, &zb)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EFAULT));
}
check_snapshot = B_FALSE;
} else if (error == 0) {
txg_to_consider = latest_txg;
}
/*
* Retrieve the number of snapshots if the dataset is not a snapshot.
*/
uint64_t snap_count = 0;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
error = zap_count(spa->spa_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
}
if (snap_count == 0) {
/* Filesystem without snapshots. */
dsl_dataset_rele(ds, FTAG);
return (0);
}
uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsl_dataset_rele(ds, FTAG);
/* Check only snapshots created from this file system. */
while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
snap_obj_txg <= txg_to_consider) {
error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds);
if (error != 0)
return (error);
if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != fs) {
snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsl_dataset_rele(ds, FTAG);
continue;
}
boolean_t affected = B_TRUE;
if (check_snapshot) {
uint64_t blk_txg;
error = find_birth_txg(ds, zep, &blk_txg);
/*
* Scrub the snapshot also when zb_birth == 0 or when
* find_birth_txg() returns an error.
*/
affected = (error == 0 && zep->zb_birth == blk_txg) ||
(error != 0) || (zep->zb_birth == 0);
}
/* Scrub snapshots. */
if (affected) {
zbookmark_phys_t zb;
zep_to_zb(snap_obj, zep, &zb);
scn->scn_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/* We have already acquired the config lock for spa */
read_by_block_level(scn, zb);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
scn->errorscrub_phys.dep_examined++;
scn->errorscrub_phys.dep_to_examine--;
(*count)++;
if ((*count) == zfs_scrub_error_blocks_per_txg ||
dsl_error_scrub_check_suspend(scn, &zb)) {
dsl_dataset_rele(ds, FTAG);
return (EFAULT);
}
}
snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsl_dataset_rele(ds, FTAG);
}
return (0);
}
void
dsl_errorscrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
/*
* Only process scans in sync pass 1.
*/
if (spa_sync_pass(spa) > 1)
return;
/*
* If the spa is shutting down, then stop scanning. This will
* ensure that the scan does not dirty any new data during the
* shutdown phase.
*/
if (spa_shutting_down(spa))
return;
if (!dsl_errorscrub_active(scn) || dsl_errorscrub_is_paused(scn)) {
return;
}
if (dsl_scan_resilvering(scn->scn_dp)) {
/* cancel the error scrub if resilver started */
dsl_scan_cancel(scn->scn_dp);
return;
}
spa->spa_scrub_active = B_TRUE;
scn->scn_sync_start_time = gethrtime();
/*
* zfs_scan_suspend_progress can be set to disable scrub progress.
* See more detailed comment in dsl_scan_sync().
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
int mintime = zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
!spa_shutting_down(scn->scn_dp->dp_spa) &&
NSEC2MSEC(scan_time_ns) < mintime) {
delay(hz);
scan_time_ns = gethrtime() - scn->scn_sync_start_time;
}
return;
}
int i = 0;
zap_attribute_t *za;
zbookmark_phys_t *zb;
boolean_t limit_exceeded = B_FALSE;
za = zap_attribute_alloc();
zb = kmem_zalloc(sizeof (zbookmark_phys_t), KM_SLEEP);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
zap_cursor_advance(&scn->errorscrub_cursor)) {
name_to_bookmark(za->za_name, zb);
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
dsl_pool_config_enter(dp, FTAG);
read_by_block_level(scn, *zb);
dsl_pool_config_exit(dp, FTAG);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
scn->errorscrub_phys.dep_examined += 1;
scn->errorscrub_phys.dep_to_examine -= 1;
i++;
if (i == zfs_scrub_error_blocks_per_txg ||
dsl_error_scrub_check_suspend(scn, zb)) {
limit_exceeded = B_TRUE;
break;
}
}
if (!limit_exceeded)
dsl_errorscrub_done(scn, B_TRUE, tx);
dsl_errorscrub_sync_state(scn, tx);
zap_attribute_free(za);
kmem_free(zb, sizeof (*zb));
return;
}
int error = 0;
for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
zap_cursor_advance(&scn->errorscrub_cursor)) {
zap_cursor_t *head_ds_cursor;
zap_attribute_t *head_ds_attr;
zbookmark_err_phys_t head_ds_block;
head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
head_ds_attr = zap_attribute_alloc();
uint64_t head_ds_err_obj = za->za_first_integer;
uint64_t head_ds;
name_to_object(za->za_name, &head_ds);
boolean_t config_held = B_FALSE;
uint64_t top_affected_fs;
for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset,
head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor,
head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) {
name_to_errphys(head_ds_attr->za_name, &head_ds_block);
/*
* In case we are called from spa_sync the pool
* config is already held.
*/
if (!dsl_pool_config_held(dp)) {
dsl_pool_config_enter(dp, FTAG);
config_held = B_TRUE;
}
error = find_top_affected_fs(spa,
head_ds, &head_ds_block, &top_affected_fs);
if (error)
break;
error = scrub_filesystem(spa, top_affected_fs,
&head_ds_block, &i);
if (error == SET_ERROR(EFAULT)) {
limit_exceeded = B_TRUE;
break;
}
}
zap_cursor_fini(head_ds_cursor);
kmem_free(head_ds_cursor, sizeof (*head_ds_cursor));
zap_attribute_free(head_ds_attr);
if (config_held)
dsl_pool_config_exit(dp, FTAG);
}
zap_attribute_free(za);
kmem_free(zb, sizeof (*zb));
if (!limit_exceeded)
dsl_errorscrub_done(scn, B_TRUE, tx);
dsl_errorscrub_sync_state(scn, tx);
}
/*
* This is the primary entry point for scans that is called from syncing
* context. Scans must happen entirely during syncing context so that we
* can guarantee that blocks we are currently scanning will not change out
* from under us. While a scan is active, this function controls how quickly
* transaction groups proceed, instead of the normal handling provided by
* txg_sync_thread().
*/
void
dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
{
int err = 0;
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
state_sync_type_t sync_type = SYNC_OPTIONAL;
int restart_early = 0;
if (spa->spa_resilver_deferred) {
uint64_t to_issue, issued;
if (!spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_RESILVER_DEFER))
spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
/*
* See print_scan_scrub_resilver_status() issued/total_i
* @ cmd/zpool/zpool_main.c
*/
to_issue =
scn->scn_phys.scn_to_examine - scn->scn_phys.scn_skipped;
issued =
scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
restart_early =
zfs_resilver_disable_defer ||
(issued < (to_issue * zfs_resilver_defer_percent / 100));
}
/*
* Only process scans in sync pass 1.
*/
if (spa_sync_pass(spa) > 1)
return;
/*
* Check for scn_restart_txg before checking spa_load_state, so
* that we can restart an old-style scan while the pool is being
* imported (see dsl_scan_init). We also restart scans if there
* is a deferred resilver and the user has manually disabled
* deferred resilvers via zfs_resilver_disable_defer, or if the
* current scan progress is below zfs_resilver_defer_percent.
*/
if (dsl_scan_restarting(scn, tx) || restart_early) {
setup_sync_arg_t setup_sync_arg = {
.func = POOL_SCAN_SCRUB,
.txgstart = 0,
.txgend = 0,
};
dsl_scan_done(scn, B_FALSE, tx);
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
setup_sync_arg.func = POOL_SCAN_RESILVER;
zfs_dbgmsg("restarting scan func=%u on %s txg=%llu early=%d",
setup_sync_arg.func, dp->dp_spa->spa_name,
(longlong_t)tx->tx_txg, restart_early);
dsl_scan_setup_sync(&setup_sync_arg, tx);
}
/*
* If the spa is shutting down, then stop scanning. This will
* ensure that the scan does not dirty any new data during the
* shutdown phase.
*/
if (spa_shutting_down(spa))
return;
/*
* If the scan is inactive due to a stalled async destroy, try again.
*/
if (!scn->scn_async_stalled && !dsl_scan_active(scn))
return;
/* reset scan statistics */
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
scn->scn_holes_this_txg = 0;
scn->scn_lt_min_this_txg = 0;
scn->scn_gt_max_this_txg = 0;
scn->scn_ddt_contained_this_txg = 0;
scn->scn_objsets_visited_this_txg = 0;
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_zios_this_txg = 0;
scn->scn_suspending = B_FALSE;
scn->scn_sync_start_time = gethrtime();
spa->spa_scrub_active = B_TRUE;
/*
* First process the async destroys. If we suspend, don't do
* any scrubbing or resilvering. This ensures that there are no
* async destroys while we are scanning, so the scan code doesn't
* have to worry about traversing it. It is also faster to free the
* blocks than to scrub them.
*/
err = dsl_process_async_destroys(dp, tx);
if (err != 0)
return;
if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
return;
/*
* Wait a few txgs after importing to begin scanning so that
* we can get the pool imported quickly.
*/
if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
return;
/*
* zfs_scan_suspend_progress can be set to disable scan progress.
* We don't want to spin the txg_sync thread, so we add a delay
* here to simulate the time spent doing a scan. This is mostly
* useful for testing and debugging.
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
uint_t mintime = (scn->scn_phys.scn_func ==
POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms :
zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
!spa_shutting_down(scn->scn_dp->dp_spa) &&
NSEC2MSEC(scan_time_ns) < mintime) {
delay(hz);
scan_time_ns = gethrtime() - scn->scn_sync_start_time;
}
return;
}
/*
* Disabled by default, set zfs_scan_report_txgs to report
* average performance over the last zfs_scan_report_txgs TXGs.
*/
if (zfs_scan_report_txgs != 0 &&
tx->tx_txg % zfs_scan_report_txgs == 0) {
scn->scn_issued_before_pass += spa->spa_scan_pass_issued;
spa_scan_stat_init(spa);
}
/*
* It is possible to switch from unsorted to sorted at any time,
* but afterwards the scan will remain sorted unless reloaded from
* a checkpoint after a reboot.
*/
if (!zfs_scan_legacy) {
scn->scn_is_sorted = B_TRUE;
if (scn->scn_last_checkpoint == 0)
scn->scn_last_checkpoint = ddi_get_lbolt();
}
/*
* For sorted scans, determine what kind of work we will be doing
* this txg based on our memory limitations and whether or not we
* need to perform a checkpoint.
*/
if (scn->scn_is_sorted) {
/*
* If we are over our checkpoint interval, set scn_clearing
* so that we can begin checkpointing immediately. The
* checkpoint allows us to save a consistent bookmark
* representing how much data we have scrubbed so far.
* Otherwise, use the memory limit to determine if we should
* scan for metadata or start issue scrub IOs. We accumulate
* metadata until we hit our hard memory limit at which point
* we issue scrub IOs until we are at our soft memory limit.
*/
if (scn->scn_checkpointing ||
ddi_get_lbolt() - scn->scn_last_checkpoint >
SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
if (!scn->scn_checkpointing)
zfs_dbgmsg("begin scan checkpoint for %s",
spa->spa_name);
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
} else {
boolean_t should_clear = dsl_scan_should_clear(scn);
if (should_clear && !scn->scn_clearing) {
zfs_dbgmsg("begin scan clearing for %s",
spa->spa_name);
scn->scn_clearing = B_TRUE;
} else if (!should_clear && scn->scn_clearing) {
zfs_dbgmsg("finish scan clearing for %s",
spa->spa_name);
scn->scn_clearing = B_FALSE;
}
}
} else {
ASSERT0(scn->scn_checkpointing);
ASSERT0(scn->scn_clearing);
}
if (!scn->scn_clearing && scn->scn_done_txg == 0) {
/* Need to scan metadata for more blocks to scrub */
dsl_scan_phys_t *scnp = &scn->scn_phys;
taskqid_t prefetch_tqid;
/*
* Calculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
* Limits for the issuing phase are done per top-level vdev and
* are handled separately.
*/
scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
if (scnp->scn_ddt_bookmark.ddb_class <=
scnp->scn_ddt_class_max) {
ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
zfs_dbgmsg("doing scan sync for %s txg %llu; "
"ddt bm=%llu/%llu/%llu/%llx",
spa->spa_name,
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
} else {
zfs_dbgmsg("doing scan sync for %s txg %llu; "
"bm=%llu/%llu/%llu/%llu",
spa->spa_name,
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_bookmark.zb_objset,
(longlong_t)scnp->scn_bookmark.zb_object,
(longlong_t)scnp->scn_bookmark.zb_level,
(longlong_t)scnp->scn_bookmark.zb_blkid);
}
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scn->scn_prefetch_stop = B_FALSE;
prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
dsl_scan_prefetch_thread, scn, TQ_SLEEP);
ASSERT(prefetch_tqid != TASKQID_INVALID);
dsl_pool_config_enter(dp, FTAG);
dsl_scan_visit(scn, tx);
dsl_pool_config_exit(dp, FTAG);
mutex_enter(&dp->dp_spa->spa_scrub_lock);
scn->scn_prefetch_stop = B_TRUE;
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&dp->dp_spa->spa_scrub_lock);
taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
zfs_dbgmsg("scan visited %llu blocks of %s in %llums "
"(%llu os's, %llu holes, %llu < mintxg, "
"%llu in ddt, %llu > maxtxg)",
(longlong_t)scn->scn_visited_this_txg,
spa->spa_name,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_objsets_visited_this_txg,
(longlong_t)scn->scn_holes_this_txg,
(longlong_t)scn->scn_lt_min_this_txg,
(longlong_t)scn->scn_ddt_contained_this_txg,
(longlong_t)scn->scn_gt_max_this_txg);
if (!scn->scn_suspending) {
ASSERT0(avl_numnodes(&scn->scn_queue));
scn->scn_done_txg = tx->tx_txg + 1;
if (scn->scn_is_sorted) {
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
scn->scn_issued_before_pass +=
spa->spa_scan_pass_issued;
spa_scan_stat_init(spa);
}
zfs_dbgmsg("scan complete for %s txg %llu",
spa->spa_name,
(longlong_t)tx->tx_txg);
}
} else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) {
ASSERT(scn->scn_clearing);
/* need to issue scrubbing IOs from per-vdev queues */
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scan_io_queues_run(scn);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
/* calculate and dprintf the current memory usage */
(void) dsl_scan_should_clear(scn);
dsl_scan_update_stats(scn);
zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) "
"in %llums (avg_block_size = %llu, avg_seg_size = %llu)",
(longlong_t)scn->scn_zios_this_txg,
spa->spa_name,
(longlong_t)scn->scn_segs_this_txg,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_avg_zio_size_this_txg,
(longlong_t)scn->scn_avg_seg_size_this_txg);
} else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
/* Finished with everything. Mark the scrub as complete */
zfs_dbgmsg("scan issuing complete txg %llu for %s",
(longlong_t)tx->tx_txg,
spa->spa_name);
ASSERT3U(scn->scn_done_txg, !=, 0);
ASSERT0(spa->spa_scrub_inflight);
ASSERT0(scn->scn_queues_pending);
dsl_scan_done(scn, B_TRUE, tx);
sync_type = SYNC_MANDATORY;
}
dsl_scan_sync_state(scn, tx, sync_type);
}
static void
count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all)
{
/*
* Don't count embedded bp's, since we already did the work of
* scanning these when we scanned the containing block.
*/
if (BP_IS_EMBEDDED(bp))
return;
/*
* Update the spa's stats on how many bytes we have issued.
* Sequential scrubs create a zio for each DVA of the bp. Each
* of these will include all DVAs for repair purposes, but the
* zio code will only try the first one unless there is an issue.
* Therefore, we should only count the first DVA for these IOs.
*/
atomic_add_64(&spa->spa_scan_pass_issued,
all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
}
static void
count_block_skipped(dsl_scan_t *scn, const blkptr_t *bp, boolean_t all)
{
if (BP_IS_EMBEDDED(bp))
return;
atomic_add_64(&scn->scn_phys.scn_skipped,
all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
}
static void
count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
{
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
*/
if (zab == NULL)
return;
for (int i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
if (t & DMU_OT_NEWTYPE)
t = DMU_OT_OTHER;
zfs_blkstat_t *zb = &zab->zab_type[l][t];
int equal;
zb->zb_count++;
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]))
zb->zb_ditto_2_of_2_samevdev++;
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal == 1)
zb->zb_ditto_2_of_3_samevdev++;
else if (equal == 3)
zb->zb_ditto_3_of_3_samevdev++;
break;
}
}
}
static void
scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
{
avl_index_t idx;
dsl_scan_t *scn = queue->q_scn;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (unlikely(avl_is_empty(&queue->q_sios_by_addr)))
atomic_add_64(&scn->scn_queues_pending, 1);
if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
/* block is already scheduled for reading */
sio_free(sio);
return;
}
avl_insert(&queue->q_sios_by_addr, sio, idx);
queue->q_sio_memused += SIO_GET_MUSED(sio);
- range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio),
+ zfs_range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio),
SIO_GET_ASIZE(sio));
}
/*
* Given all the info we got from our metadata scanning process, we
* construct a scan_io_t and insert it into the scan sorting queue. The
* I/O must already be suitable for us to process. This is controlled
* by dsl_scan_enqueue().
*/
static void
scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
int zio_flags, const zbookmark_phys_t *zb)
{
scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
ASSERT0(BP_IS_GANG(bp));
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
bp2sio(bp, sio, dva_i);
sio->sio_flags = zio_flags;
sio->sio_zb = *zb;
queue->q_last_ext_addr = -1;
scan_io_queue_insert_impl(queue, sio);
}
/*
* Given a set of I/O parameters as discovered by the metadata traversal
* process, attempts to place the I/O into the sorted queues (if allowed),
* or immediately executes the I/O.
*/
static void
dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb)
{
spa_t *spa = dp->dp_spa;
ASSERT(!BP_IS_EMBEDDED(bp));
/*
* Gang blocks are hard to issue sequentially, so we just issue them
* here immediately instead of queuing them.
*/
if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
scan_exec_io(dp, bp, zio_flags, zb, NULL);
return;
}
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
dva_t dva;
vdev_t *vdev;
dva = bp->blk_dva[i];
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
ASSERT(vdev != NULL);
mutex_enter(&vdev->vdev_scan_io_queue_lock);
if (vdev->vdev_scan_io_queue == NULL)
vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
ASSERT(dp->dp_scan != NULL);
scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
i, zio_flags, zb);
mutex_exit(&vdev->vdev_scan_io_queue_lock);
}
}
static int
dsl_scan_scrub_cb(dsl_pool_t *dp,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
uint64_t phys_birth = BP_GET_BIRTH(bp);
size_t psize = BP_GET_PSIZE(bp);
boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
count_block(dp->dp_blkstats, bp);
if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg) {
count_block_skipped(scn, bp, B_TRUE);
return (0);
}
/* Embedded BP's have phys_birth==0, so we reject them above. */
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
zio_flags |= ZIO_FLAG_SCRUB;
needs_io = B_TRUE;
} else {
ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
zio_flags |= ZIO_FLAG_RESILVER;
needs_io = B_FALSE;
}
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE;
for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
const dva_t *dva = &bp->blk_dva[d];
/*
* Keep track of how much data we've examined so that
* zpool(8) status can make useful progress reports.
*/
uint64_t asize = DVA_GET_ASIZE(dva);
scn->scn_phys.scn_examined += asize;
spa->spa_scan_pass_exam += asize;
/* if it's a resilver, this may not be in the target range */
if (!needs_io)
needs_io = dsl_scan_need_resilver(spa, dva, psize,
phys_birth);
}
if (needs_io && !zfs_no_scrub_io) {
dsl_scan_enqueue(dp, bp, zio_flags, zb);
} else {
count_block_skipped(scn, bp, B_TRUE);
}
/* do not relocate this block */
return (0);
}
static void
dsl_scan_scrub_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
dsl_scan_io_queue_t *queue = zio->io_private;
abd_free(zio->io_abd);
if (queue == NULL) {
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
} else {
mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&queue->q_zio_cv);
mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
}
if (zio->io_error && (zio->io_error != ECKSUM ||
!(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
if (dsl_errorscrubbing(spa->spa_dsl_pool) &&
!dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) {
atomic_inc_64(&spa->spa_dsl_pool->dp_scan
->errorscrub_phys.dep_errors);
} else {
atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys
.scn_errors);
}
}
}
/*
* Given a scanning zio's information, executes the zio. The zio need
* not necessarily be only sortable, this function simply executes the
* zio, no matter what it is. The optional queue argument allows the
* caller to specify that they want per top level vdev IO rate limiting
* instead of the legacy global limiting.
*/
static void
scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
size_t size = BP_GET_PSIZE(bp);
abd_t *data = abd_alloc_for_io(size, B_FALSE);
zio_t *pio;
if (queue == NULL) {
ASSERT3U(scn->scn_maxinflight_bytes, >, 0);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
mutex_exit(&spa->spa_scrub_lock);
pio = scn->scn_zio_root;
} else {
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
ASSERT3U(queue->q_maxinflight_bytes, >, 0);
mutex_enter(q_lock);
while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
cv_wait(&queue->q_zio_cv, q_lock);
queue->q_inflight_bytes += BP_GET_PSIZE(bp);
pio = queue->q_zio;
mutex_exit(q_lock);
}
ASSERT(pio != NULL);
count_block_issued(spa, bp, queue == NULL);
zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done,
queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
}
/*
* This is the primary extent sorting algorithm. We balance two parameters:
* 1) how many bytes of I/O are in an extent
* 2) how well the extent is filled with I/O (as a fraction of its total size)
* Since we allow extents to have gaps between their constituent I/Os, it's
* possible to have a fairly large extent that contains the same amount of
* I/O bytes than a much smaller extent, which just packs the I/O more tightly.
* The algorithm sorts based on a score calculated from the extent's size,
* the relative fill volume (in %) and a "fill weight" parameter that controls
* the split between whether we prefer larger extents or more well populated
* extents:
*
* SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
*
* Example:
* 1) assume extsz = 64 MiB
* 2) assume fill = 32 MiB (extent is half full)
* 3) assume fill_weight = 3
* 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
* SCORE = 32M + (50 * 3 * 32M) / 100
* SCORE = 32M + (4800M / 100)
* SCORE = 32M + 48M
* ^ ^
* | +--- final total relative fill-based score
* +--------- final total fill-based score
* SCORE = 80M
*
* As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
* extents that are more completely filled (in a 3:2 ratio) vs just larger.
* Note that as an optimization, we replace multiplication and division by
* 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
*
* Since we do not care if one extent is only few percent better than another,
* compress the score into 6 bits via binary logarithm AKA highbit64() and
* put into otherwise unused due to ashift high bits of offset. This allows
* to reduce q_exts_by_size B-tree elements to only 64 bits and compare them
* with single operation. Plus it makes scrubs more sequential and reduces
* chances that minor extent change move it within the B-tree.
*/
__attribute__((always_inline)) inline
static int
ext_size_compare(const void *x, const void *y)
{
const uint64_t *a = x, *b = y;
return (TREE_CMP(*a, *b));
}
ZFS_BTREE_FIND_IN_BUF_FUNC(ext_size_find_in_buf, uint64_t,
ext_size_compare)
static void
-ext_size_create(range_tree_t *rt, void *arg)
+ext_size_create(zfs_range_tree_t *rt, void *arg)
{
(void) rt;
zfs_btree_t *size_tree = arg;
zfs_btree_create(size_tree, ext_size_compare, ext_size_find_in_buf,
sizeof (uint64_t));
}
static void
-ext_size_destroy(range_tree_t *rt, void *arg)
+ext_size_destroy(zfs_range_tree_t *rt, void *arg)
{
(void) rt;
zfs_btree_t *size_tree = arg;
ASSERT0(zfs_btree_numnodes(size_tree));
zfs_btree_destroy(size_tree);
}
static uint64_t
-ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg)
+ext_size_value(zfs_range_tree_t *rt, zfs_range_seg_gap_t *rsg)
{
(void) rt;
uint64_t size = rsg->rs_end - rsg->rs_start;
uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) *
fill_weight * rsg->rs_fill) >> 7);
ASSERT3U(rt->rt_shift, >=, 8);
return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start);
}
static void
-ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg)
+ext_size_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
- ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
- uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
+ ASSERT3U(rt->rt_type, ==, ZFS_RANGE_SEG_GAP);
+ uint64_t v = ext_size_value(rt, (zfs_range_seg_gap_t *)rs);
zfs_btree_add(size_tree, &v);
}
static void
-ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
+ext_size_remove(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
- ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
- uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
+ ASSERT3U(rt->rt_type, ==, ZFS_RANGE_SEG_GAP);
+ uint64_t v = ext_size_value(rt, (zfs_range_seg_gap_t *)rs);
zfs_btree_remove(size_tree, &v);
}
static void
-ext_size_vacate(range_tree_t *rt, void *arg)
+ext_size_vacate(zfs_range_tree_t *rt, void *arg)
{
zfs_btree_t *size_tree = arg;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
ext_size_create(rt, arg);
}
-static const range_tree_ops_t ext_size_ops = {
+static const zfs_range_tree_ops_t ext_size_ops = {
.rtop_create = ext_size_create,
.rtop_destroy = ext_size_destroy,
.rtop_add = ext_size_add,
.rtop_remove = ext_size_remove,
.rtop_vacate = ext_size_vacate
};
/*
* Comparator for the q_sios_by_addr tree. Sorting is simply performed
* based on LBA-order (from lowest to highest).
*/
static int
sio_addr_compare(const void *x, const void *y)
{
const scan_io_t *a = x, *b = y;
return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b)));
}
/* IO queues are created on demand when they are needed. */
static dsl_scan_io_queue_t *
scan_io_queue_create(vdev_t *vd)
{
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
q->q_scn = scn;
q->q_vd = vd;
q->q_sio_memused = 0;
q->q_last_ext_addr = -1;
cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
- q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP,
- &q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap);
+ q->q_exts_by_addr = zfs_range_tree_create_gap(&ext_size_ops,
+ ZFS_RANGE_SEG_GAP, &q->q_exts_by_size, 0, vd->vdev_ashift,
+ zfs_scan_max_ext_gap);
avl_create(&q->q_sios_by_addr, sio_addr_compare,
sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
return (q);
}
/*
* Destroys a scan queue and all segments and scan_io_t's contained in it.
* No further execution of I/O occurs, anything pending in the queue is
* simply freed without being executed.
*/
void
dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
void *cookie = NULL;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (!avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&scn->scn_queues_pending, -1);
while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
NULL) {
- ASSERT(range_tree_contains(queue->q_exts_by_addr,
+ ASSERT(zfs_range_tree_contains(queue->q_exts_by_addr,
SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
queue->q_sio_memused -= SIO_GET_MUSED(sio);
sio_free(sio);
}
ASSERT0(queue->q_sio_memused);
- range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
- range_tree_destroy(queue->q_exts_by_addr);
+ zfs_range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
+ zfs_range_tree_destroy(queue->q_exts_by_addr);
avl_destroy(&queue->q_sios_by_addr);
cv_destroy(&queue->q_zio_cv);
kmem_free(queue, sizeof (*queue));
}
/*
* Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
* called on behalf of vdev_top_transfer when creating or destroying
* a mirror vdev due to zpool attach/detach.
*/
void
dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
{
mutex_enter(&svd->vdev_scan_io_queue_lock);
mutex_enter(&tvd->vdev_scan_io_queue_lock);
VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
svd->vdev_scan_io_queue = NULL;
if (tvd->vdev_scan_io_queue != NULL)
tvd->vdev_scan_io_queue->q_vd = tvd;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
mutex_exit(&svd->vdev_scan_io_queue_lock);
}
static void
scan_io_queues_destroy(dsl_scan_t *scn)
{
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
mutex_enter(&tvd->vdev_scan_io_queue_lock);
if (tvd->vdev_scan_io_queue != NULL)
dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
tvd->vdev_scan_io_queue = NULL;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
}
static void
dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
vdev_t *vdev;
kmutex_t *q_lock;
dsl_scan_io_queue_t *queue;
scan_io_t *srch_sio, *sio;
avl_index_t idx;
uint64_t start, size;
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
ASSERT(vdev != NULL);
q_lock = &vdev->vdev_scan_io_queue_lock;
queue = vdev->vdev_scan_io_queue;
mutex_enter(q_lock);
if (queue == NULL) {
mutex_exit(q_lock);
return;
}
srch_sio = sio_alloc(BP_GET_NDVAS(bp));
bp2sio(bp, srch_sio, dva_i);
start = SIO_GET_OFFSET(srch_sio);
size = SIO_GET_ASIZE(srch_sio);
/*
* We can find the zio in two states:
* 1) Cold, just sitting in the queue of zio's to be issued at
* some point in the future. In this case, all we do is
* remove the zio from the q_sios_by_addr tree, decrement
- * its data volume from the containing range_seg_t and
+ * its data volume from the containing zfs_range_seg_t and
* resort the q_exts_by_size tree to reflect that the
- * range_seg_t has lost some of its 'fill'. We don't shorten
- * the range_seg_t - this is usually rare enough not to be
+ * zfs_range_seg_t has lost some of its 'fill'. We don't shorten
+ * the zfs_range_seg_t - this is usually rare enough not to be
* worth the extra hassle of trying keep track of precise
* extent boundaries.
* 2) Hot, where the zio is currently in-flight in
* dsl_scan_issue_ios. In this case, we can't simply
* reach in and stop the in-flight zio's, so we instead
* block the caller. Eventually, dsl_scan_issue_ios will
* be done with issuing the zio's it gathered and will
* signal us.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio != NULL) {
blkptr_t tmpbp;
/* Got it while it was cold in the queue */
ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
ASSERT3U(size, ==, SIO_GET_ASIZE(sio));
avl_remove(&queue->q_sios_by_addr, sio);
if (avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
- ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
- range_tree_remove_fill(queue->q_exts_by_addr, start, size);
+ ASSERT(zfs_range_tree_contains(queue->q_exts_by_addr, start,
+ size));
+ zfs_range_tree_remove_fill(queue->q_exts_by_addr, start, size);
/* count the block as though we skipped it */
sio2bp(sio, &tmpbp);
count_block_skipped(scn, &tmpbp, B_FALSE);
sio_free(sio);
}
mutex_exit(q_lock);
}
/*
* Callback invoked when a zio_free() zio is executing. This needs to be
* intercepted to prevent the zio from deallocating a particular portion
* of disk space and it then getting reallocated and written to, while we
* still have it queued up for processing.
*/
void
dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(scn != NULL);
if (!dsl_scan_is_running(scn))
return;
for (int i = 0; i < BP_GET_NDVAS(bp); i++)
dsl_scan_freed_dva(spa, bp, i);
}
/*
* Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has
* not started, start it. Otherwise, only restart if max txg in DTL range is
* greater than the max txg in the current scan. If the DTL max is less than
* the scan max, then the vdev has not missed any new data since the resilver
* started, so a restart is not needed.
*/
void
dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd)
{
uint64_t min, max;
if (!vdev_resilver_needed(vd, &min, &max))
return;
if (!dsl_scan_resilvering(dp)) {
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
return;
}
if (max <= dp->dp_scan->scn_phys.scn_max_txg)
return;
/* restart is needed, check if it can be deferred */
if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
vdev_defer_resilver(vd);
else
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
}
ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW,
"Max bytes in flight per leaf vdev for scrubs and resilvers");
ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to scrub per txg");
ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to obsolete per txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to free per txg");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to resilver per txg");
ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
"Set to prevent scans from progressing");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW,
"Set to disable scrub I/O");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW,
"Set to disable scrub prefetching");
ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW,
"Max number of blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW,
"Max number of dedup blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
"Enable processing of the free_bpobj");
ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
"Enable block statistics calculation during scrub");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW,
"Fraction of RAM for scan hard limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW,
"IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
"Scrub using legacy non-sequential method");
ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW,
"Scan progress on-disk checkpointing interval");
ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW,
"Max gap in bytes between sequential scrub / resilver I/Os");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW,
"Fraction of hard limit used as soft limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
"Tunable to attempt to reduce lock contention");
ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW,
"Tunable to adjust bias towards more filled segments during scans");
ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW,
"Tunable to report resilver performance over the last N txgs");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
"Process all resilvers immediately");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_defer_percent, UINT, ZMOD_RW,
"Issued IO percent complete after which resilvers are deferred");
ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, UINT, ZMOD_RW,
"Error blocks to be scrubbed in one txg");
diff --git a/sys/contrib/openzfs/module/zfs/metaslab.c b/sys/contrib/openzfs/module/zfs/metaslab.c
index 7affbfac9dc7..35bd968f68ce 100644
--- a/sys/contrib/openzfs/module/zfs/metaslab.c
+++ b/sys/contrib/openzfs/module/zfs/metaslab.c
@@ -1,6283 +1,6306 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/space_map.h>
#include <sys/metaslab_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zfeature.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/zap.h>
#include <sys/btree.h>
#define GANG_ALLOCATION(flags) \
((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
/*
* Metaslab granularity, in bytes. This is roughly similar to what would be
* referred to as the "stripe size" in traditional RAID arrays. In normal
* operation, we will try to write this amount of data to each disk before
* moving on to the next top-level vdev.
*/
static uint64_t metaslab_aliquot = 1024 * 1024;
/*
* For testing, make some blocks above a certain size be gang blocks.
*/
uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
/*
* Of blocks of size >= metaslab_force_ganging, actually gang them this often.
*/
uint_t metaslab_force_ganging_pct = 3;
/*
* In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each
* transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
* around the disk. So a sane default for the space map block size
* is 8~16K.
*/
int zfs_metaslab_sm_blksz_no_log = (1 << 14);
/*
* When the log space map feature is enabled, we accumulate a lot of
* changes per metaslab that are flushed once in a while so we benefit
* from a bigger block size like 128K for the metaslab space maps.
*/
int zfs_metaslab_sm_blksz_with_log = (1 << 17);
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
uint_t zfs_condense_pct = 200;
/*
* Condensing a metaslab is not guaranteed to actually reduce the amount of
* space used on disk. In particular, a space map uses data in increments of
* MAX(1 << ashift, space_map_blksz), so a metaslab might use the
* same number of blocks after condensing. Since the goal of condensing is to
* reduce the number of IOPs required to read the space map, we only want to
* condense when we can be sure we will reduce the number of blocks used by the
* space map. Unfortunately, we cannot precisely compute whether or not this is
* the case in metaslab_should_condense since we are holding ms_lock. Instead,
* we apply the following heuristic: do not condense a spacemap unless the
* uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
* blocks.
*/
static const int zfs_metaslab_condense_block_threshold = 4;
/*
* The zfs_mg_noalloc_threshold defines which metaslab groups should
* be eligible for allocation. The value is defined as a percentage of
* free space. Metaslab groups that have more free space than
* zfs_mg_noalloc_threshold are always eligible for allocations. Once
* a metaslab group's free space is less than or equal to the
* zfs_mg_noalloc_threshold the allocator will avoid allocating to that
* group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
* Once all groups in the pool reach zfs_mg_noalloc_threshold then all
* groups are allowed to accept allocations. Gang blocks are always
* eligible to allocate on any metaslab group. The default value of 0 means
* no metaslab group will be excluded based on this criterion.
*/
static uint_t zfs_mg_noalloc_threshold = 0;
/*
* Metaslab groups are considered eligible for allocations if their
* fragmentation metric (measured as a percentage) is less than or
* equal to zfs_mg_fragmentation_threshold. If a metaslab group
* exceeds this threshold then it will be skipped unless all metaslab
* groups within the metaslab class have also crossed this threshold.
*
* This tunable was introduced to avoid edge cases where we continue
* allocating from very fragmented disks in our pool while other, less
* fragmented disks, exists. On the other hand, if all disks in the
* pool are uniformly approaching the threshold, the threshold can
* be a speed bump in performance, where we keep switching the disks
* that we allocate from (e.g. we allocate some segments from disk A
* making it bypassing the threshold while freeing segments from disk
* B getting its fragmentation below the threshold).
*
* Empirically, we've seen that our vdev selection for allocations is
* good enough that fragmentation increases uniformly across all vdevs
* the majority of the time. Thus we set the threshold percentage high
* enough to avoid hitting the speed bump on pools that are being pushed
* to the edge.
*/
static uint_t zfs_mg_fragmentation_threshold = 95;
/*
* Allow metaslabs to keep their active state as long as their fragmentation
* percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
* active metaslab that exceeds this threshold will no longer keep its active
* status allowing better metaslabs to be selected.
*/
-static uint_t zfs_metaslab_fragmentation_threshold = 70;
+static uint_t zfs_metaslab_fragmentation_threshold = 77;
/*
* When set will load all metaslabs when pool is first opened.
*/
int metaslab_debug_load = B_FALSE;
/*
* When set will prevent metaslabs from being unloaded.
*/
static int metaslab_debug_unload = B_FALSE;
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
uint_t metaslab_df_free_pct = 4;
/*
* Maximum distance to search forward from the last offset. Without this
* limit, fragmented pools can see >100,000 iterations and
* metaslab_block_picker() becomes the performance limiting factor on
* high-performance storage.
*
* With the default setting of 16MB, we typically see less than 500
* iterations, even with very fragmented, ashift=9 pools. The maximum number
* of iterations possible is:
* metaslab_df_max_search / (2 * (1<<ashift))
* With the default setting of 16MB this is 16*1024 (with ashift=9) or
* 2048 (with ashift=12).
*/
static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
/*
* Forces the metaslab_block_picker function to search for at least this many
* segments forwards until giving up on finding a segment that the allocation
* will fit into.
*/
static const uint32_t metaslab_min_search_count = 100;
/*
* If we are not searching forward (due to metaslab_df_max_search,
* metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
* controls what segment is used. If it is set, we will use the largest free
* segment. If it is not set, we will use a segment of exactly the requested
* size (or larger).
*/
static int metaslab_df_use_largest_segment = B_FALSE;
/*
* These tunables control how long a metaslab will remain loaded after the
* last allocation from it. A metaslab can't be unloaded until at least
* metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
* have elapsed. However, zfs_metaslab_mem_limit may cause it to be
* unloaded sooner. These settings are intended to be generous -- to keep
* metaslabs loaded for a long time, reducing the rate of metaslab loading.
*/
static uint_t metaslab_unload_delay = 32;
static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
/*
* Max number of metaslabs per group to preload.
*/
uint_t metaslab_preload_limit = 10;
/*
* Enable/disable preloading of metaslab.
*/
static int metaslab_preload_enabled = B_TRUE;
/*
* Enable/disable fragmentation weighting on metaslabs.
*/
static int metaslab_fragmentation_factor_enabled = B_TRUE;
/*
* Enable/disable lba weighting (i.e. outer tracks are given preference).
*/
static int metaslab_lba_weighting_enabled = B_TRUE;
/*
* Enable/disable metaslab group biasing.
*/
static int metaslab_bias_enabled = B_TRUE;
/*
* Enable/disable remapping of indirect DVAs to their concrete vdevs.
*/
static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
/*
* Enable/disable segment-based metaslab selection.
*/
static int zfs_metaslab_segment_weight_enabled = B_TRUE;
/*
* When using segment-based metaslab selection, we will continue
* allocating from the active metaslab until we have exhausted
* zfs_metaslab_switch_threshold of its buckets.
*/
static int zfs_metaslab_switch_threshold = 2;
/*
* Internal switch to enable/disable the metaslab allocation tracing
* facility.
*/
static const boolean_t metaslab_trace_enabled = B_FALSE;
/*
* Maximum entries that the metaslab allocation tracing facility will keep
* in a given list when running in non-debug mode. We limit the number
* of entries in non-debug mode to prevent us from using up too much memory.
* The limit should be sufficiently large that we don't expect any allocation
* to every exceed this value. In debug mode, the system will panic if this
* limit is ever reached allowing for further investigation.
*/
static const uint64_t metaslab_trace_max_entries = 5000;
/*
* Maximum number of metaslabs per group that can be disabled
* simultaneously.
*/
static const int max_disabled_ms = 3;
/*
* Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
* To avoid 64-bit overflow, don't set above UINT32_MAX.
*/
static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
/*
* Maximum percentage of memory to use on storing loaded metaslabs. If loading
* a metaslab would take it over this percentage, the oldest selected metaslab
* is automatically unloaded.
*/
static uint_t zfs_metaslab_mem_limit = 25;
/*
* Force the per-metaslab range trees to use 64-bit integers to store
* segments. Used for debugging purposes.
*/
static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
/*
* By default we only store segments over a certain size in the size-sorted
* metaslab trees (ms_allocatable_by_size and
* ms_unflushed_frees_by_size). This dramatically reduces memory usage and
* improves load and unload times at the cost of causing us to use slightly
* larger segments than we would otherwise in some cases.
*/
static const uint32_t metaslab_by_size_min_shift = 14;
/*
* If not set, we will first try normal allocation. If that fails then
* we will do a gang allocation. If that fails then we will do a "try hard"
* gang allocation. If that fails then we will have a multi-layer gang
* block.
*
* If set, we will first try normal allocation. If that fails then
* we will do a "try hard" allocation. If that fails we will do a gang
* allocation. If that fails we will do a "try hard" gang allocation. If
* that fails then we will have a multi-layer gang block.
*/
static int zfs_metaslab_try_hard_before_gang = B_FALSE;
/*
* When not trying hard, we only consider the best zfs_metaslab_find_max_tries
* metaslabs. This improves performance, especially when there are many
* metaslabs per vdev and the allocation can't actually be satisfied (so we
* would otherwise iterate all the metaslabs). If there is a metaslab with a
* worse weight but it can actually satisfy the allocation, we won't find it
* until trying hard. This may happen if the worse metaslab is not loaded
* (and the true weight is better than we have calculated), or due to weight
* bucketization. E.g. we are looking for a 60K segment, and the best
* metaslabs all have free segments in the 32-63K bucket, but the best
* zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
* subsequent metaslab has ms_max_size >60KB (but fewer segments in this
* bucket, and therefore a lower weight).
*/
static uint_t zfs_metaslab_find_max_tries = 100;
static uint64_t metaslab_weight(metaslab_t *, boolean_t);
static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
static unsigned int metaslab_idx_func(multilist_t *, void *);
static void metaslab_evict(metaslab_t *, uint64_t);
-static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
+static void metaslab_rt_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
+ void *arg);
kmem_cache_t *metaslab_alloc_trace_cache;
typedef struct metaslab_stats {
kstat_named_t metaslabstat_trace_over_limit;
kstat_named_t metaslabstat_reload_tree;
kstat_named_t metaslabstat_too_many_tries;
kstat_named_t metaslabstat_try_hard;
} metaslab_stats_t;
static metaslab_stats_t metaslab_stats = {
{ "trace_over_limit", KSTAT_DATA_UINT64 },
{ "reload_tree", KSTAT_DATA_UINT64 },
{ "too_many_tries", KSTAT_DATA_UINT64 },
{ "try_hard", KSTAT_DATA_UINT64 },
};
#define METASLABSTAT_BUMP(stat) \
atomic_inc_64(&metaslab_stats.stat.value.ui64);
static kstat_t *metaslab_ksp;
void
metaslab_stat_init(void)
{
ASSERT(metaslab_alloc_trace_cache == NULL);
metaslab_alloc_trace_cache = kmem_cache_create(
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
"misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (metaslab_ksp != NULL) {
metaslab_ksp->ks_data = &metaslab_stats;
kstat_install(metaslab_ksp);
}
}
void
metaslab_stat_fini(void)
{
if (metaslab_ksp != NULL) {
kstat_delete(metaslab_ksp);
metaslab_ksp = NULL;
}
kmem_cache_destroy(metaslab_alloc_trace_cache);
metaslab_alloc_trace_cache = NULL;
}
/*
* ==========================================================================
* Metaslab classes
* ==========================================================================
*/
metaslab_class_t *
metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops)
{
metaslab_class_t *mc;
mc = kmem_zalloc(offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
mc->mc_spa = spa;
mc->mc_ops = ops;
mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
mca->mca_rotor = NULL;
zfs_refcount_create_tracked(&mca->mca_alloc_slots);
}
return (mc);
}
void
metaslab_class_destroy(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
ASSERT(mc->mc_alloc == 0);
ASSERT(mc->mc_deferred == 0);
ASSERT(mc->mc_space == 0);
ASSERT(mc->mc_dspace == 0);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
ASSERT(mca->mca_rotor == NULL);
zfs_refcount_destroy(&mca->mca_alloc_slots);
}
mutex_destroy(&mc->mc_lock);
multilist_destroy(&mc->mc_metaslab_txg_list);
kmem_free(mc, offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]));
}
int
metaslab_class_validate(metaslab_class_t *mc)
{
metaslab_group_t *mg;
vdev_t *vd;
/*
* Must hold one of the spa_config locks.
*/
ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
return (0);
do {
vd = mg->mg_vd;
ASSERT(vd->vdev_mg != NULL);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(mg->mg_class, ==, mc);
ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
} while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
return (0);
}
static void
metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
{
atomic_add_64(&mc->mc_alloc, alloc_delta);
atomic_add_64(&mc->mc_deferred, defer_delta);
atomic_add_64(&mc->mc_space, space_delta);
atomic_add_64(&mc->mc_dspace, dspace_delta);
}
uint64_t
metaslab_class_get_alloc(metaslab_class_t *mc)
{
return (mc->mc_alloc);
}
uint64_t
metaslab_class_get_deferred(metaslab_class_t *mc)
{
return (mc->mc_deferred);
}
uint64_t
metaslab_class_get_space(metaslab_class_t *mc)
{
return (mc->mc_space);
}
uint64_t
metaslab_class_get_dspace(metaslab_class_t *mc)
{
return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
}
void
metaslab_class_histogram_verify(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *mc_hist;
int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
- mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
+ mc_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
mutex_enter(&mc->mc_lock);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = vdev_get_mg(tvd, mc);
/*
* Skip any holes, uninitialized top-levels, or
* vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
- for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
+ for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++)
mc_hist[i] += mg->mg_histogram[i];
}
- for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
+ for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
}
mutex_exit(&mc->mc_lock);
- kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
+ kmem_free(mc_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE);
}
/*
* Calculate the metaslab class's fragmentation metric. The metric
* is weighted based on the space contribution of each metaslab group.
* The return value will be a number between 0 and 100 (inclusive), or
* ZFS_FRAG_INVALID if the metric has not been set. See comment above the
* zfs_frag_table for more information about the metric.
*/
uint64_t
metaslab_class_fragmentation(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t fragmentation = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
/*
* Skip any holes, uninitialized top-levels,
* or vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* If a metaslab group does not contain a fragmentation
* metric then just bail out.
*/
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (ZFS_FRAG_INVALID);
}
/*
* Determine how much this metaslab_group is contributing
* to the overall pool fragmentation metric.
*/
fragmentation += mg->mg_fragmentation *
metaslab_group_get_space(mg);
}
fragmentation /= metaslab_class_get_space(mc);
ASSERT3U(fragmentation, <=, 100);
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (fragmentation);
}
/*
* Calculate the amount of expandable space that is available in
* this metaslab class. If a device is expanded then its expandable
* space will be the amount of allocatable space that is currently not
* part of this metaslab class.
*/
uint64_t
metaslab_class_expandable_space(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t space = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* Calculate if we have enough space to add additional
* metaslabs. We report the expandable space in terms
* of the metaslab size since that's the unit of expansion.
*/
space += P2ALIGN_TYPED(tvd->vdev_max_asize - tvd->vdev_asize,
1ULL << tvd->vdev_ms_shift, uint64_t);
}
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (space);
}
void
metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
{
multilist_t *ml = &mc->mc_metaslab_txg_list;
hrtime_t now = gethrtime();
for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
multilist_sublist_t *mls = multilist_sublist_lock_idx(ml, i);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL) {
mutex_enter(&msp->ms_lock);
/*
* If the metaslab has been removed from the list
* (which could happen if we were at the memory limit
* and it was evicted during this loop), then we can't
* proceed and we should restart the sublist.
*/
if (!multilist_link_active(&msp->ms_class_txg_node)) {
mutex_exit(&msp->ms_lock);
i--;
break;
}
mls = multilist_sublist_lock_idx(ml, i);
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
if (txg >
msp->ms_selected_txg + metaslab_unload_delay &&
now > msp->ms_selected_time +
MSEC2NSEC(metaslab_unload_delay_ms) &&
(msp->ms_allocator == -1 ||
!metaslab_preload_enabled)) {
metaslab_evict(msp, txg);
} else {
/*
* Once we've hit a metaslab selected too
* recently to evict, we're done evicting for
* now.
*/
mutex_exit(&msp->ms_lock);
break;
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
}
}
}
static int
metaslab_compare(const void *x1, const void *x2)
{
const metaslab_t *m1 = (const metaslab_t *)x1;
const metaslab_t *m2 = (const metaslab_t *)x2;
int sort1 = 0;
int sort2 = 0;
if (m1->ms_allocator != -1 && m1->ms_primary)
sort1 = 1;
else if (m1->ms_allocator != -1 && !m1->ms_primary)
sort1 = 2;
if (m2->ms_allocator != -1 && m2->ms_primary)
sort2 = 1;
else if (m2->ms_allocator != -1 && !m2->ms_primary)
sort2 = 2;
/*
* Sort inactive metaslabs first, then primaries, then secondaries. When
* selecting a metaslab to allocate from, an allocator first tries its
* primary, then secondary active metaslab. If it doesn't have active
* metaslabs, or can't allocate from them, it searches for an inactive
* metaslab to activate. If it can't find a suitable one, it will steal
* a primary or secondary metaslab from another allocator.
*/
if (sort1 < sort2)
return (-1);
if (sort1 > sort2)
return (1);
int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
if (likely(cmp))
return (cmp);
IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
return (TREE_CMP(m1->ms_start, m2->ms_start));
}
/*
* ==========================================================================
* Metaslab groups
* ==========================================================================
*/
/*
* Update the allocatable flag and the metaslab group's capacity.
* The allocatable flag is set to true if the capacity is below
* the zfs_mg_noalloc_threshold or has a fragmentation value that is
* greater than zfs_mg_fragmentation_threshold. If a metaslab group
* transitions from allocatable to non-allocatable or vice versa then the
* metaslab group's class is updated to reflect the transition.
*/
static void
metaslab_group_alloc_update(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
metaslab_class_t *mc = mg->mg_class;
vdev_stat_t *vs = &vd->vdev_stat;
boolean_t was_allocatable;
boolean_t was_initialized;
ASSERT(vd == vd->vdev_top);
ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
SCL_ALLOC);
mutex_enter(&mg->mg_lock);
was_allocatable = mg->mg_allocatable;
was_initialized = mg->mg_initialized;
mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
(vs->vs_space + 1);
mutex_enter(&mc->mc_lock);
/*
* If the metaslab group was just added then it won't
* have any space until we finish syncing out this txg.
* At that point we will consider it initialized and available
* for allocations. We also don't consider non-activated
* metaslab groups (e.g. vdevs that are in the middle of being removed)
* to be initialized, because they can't be used for allocation.
*/
mg->mg_initialized = metaslab_group_initialized(mg);
if (!was_initialized && mg->mg_initialized) {
mc->mc_groups++;
} else if (was_initialized && !mg->mg_initialized) {
ASSERT3U(mc->mc_groups, >, 0);
mc->mc_groups--;
}
if (mg->mg_initialized)
mg->mg_no_free_space = B_FALSE;
/*
* A metaslab group is considered allocatable if it has plenty
* of free space or is not heavily fragmented. We only take
* fragmentation into account if the metaslab group has a valid
* fragmentation metric (i.e. a value between 0 and 100).
*/
mg->mg_allocatable = (mg->mg_activation_count > 0 &&
mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
(mg->mg_fragmentation == ZFS_FRAG_INVALID ||
mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
/*
* The mc_alloc_groups maintains a count of the number of
* groups in this metaslab class that are still above the
* zfs_mg_noalloc_threshold. This is used by the allocating
* threads to determine if they should avoid allocations to
* a given group. The allocator will avoid allocations to a group
* if that group has reached or is below the zfs_mg_noalloc_threshold
* and there are still other groups that are above the threshold.
* When a group transitions from allocatable to non-allocatable or
* vice versa we update the metaslab class to reflect that change.
* When the mc_alloc_groups value drops to 0 that means that all
* groups have reached the zfs_mg_noalloc_threshold making all groups
* eligible for allocations. This effectively means that all devices
* are balanced again.
*/
if (was_allocatable && !mg->mg_allocatable)
mc->mc_alloc_groups--;
else if (!was_allocatable && mg->mg_allocatable)
mc->mc_alloc_groups++;
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
int
metaslab_sort_by_flushed(const void *va, const void *vb)
{
const metaslab_t *a = va;
const metaslab_t *b = vb;
int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
if (likely(cmp))
return (cmp);
uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
cmp = TREE_CMP(a_vdev_id, b_vdev_id);
if (cmp)
return (cmp);
return (TREE_CMP(a->ms_id, b->ms_id));
}
metaslab_group_t *
metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
{
metaslab_group_t *mg;
mg = kmem_zalloc(offsetof(metaslab_group_t,
mg_allocator[allocators]), KM_SLEEP);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
mg->mg_vd = vd;
mg->mg_class = mc;
mg->mg_activation_count = 0;
mg->mg_initialized = B_FALSE;
mg->mg_no_free_space = B_TRUE;
mg->mg_allocators = allocators;
for (int i = 0; i < allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
}
return (mg);
}
void
metaslab_group_destroy(metaslab_group_t *mg)
{
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
/*
* We may have gone below zero with the activation count
* either because we never activated in the first place or
* because we're done, and possibly removing the vdev.
*/
ASSERT(mg->mg_activation_count <= 0);
avl_destroy(&mg->mg_metaslab_tree);
mutex_destroy(&mg->mg_lock);
mutex_destroy(&mg->mg_ms_disabled_lock);
cv_destroy(&mg->mg_ms_disabled_cv);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
}
kmem_free(mg, offsetof(metaslab_group_t,
mg_allocator[mg->mg_allocators]));
}
void
metaslab_group_activate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count <= 0);
if (++mg->mg_activation_count <= 0)
return;
mg->mg_aliquot = metaslab_aliquot * MAX(1,
vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd));
metaslab_group_alloc_update(mg);
if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
mg->mg_prev = mg;
mg->mg_next = mg;
} else {
mgnext = mgprev->mg_next;
mg->mg_prev = mgprev;
mg->mg_next = mgnext;
mgprev->mg_next = mg;
mgnext->mg_prev = mg;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
mc->mc_allocator[i].mca_rotor = mg;
mg = mg->mg_next;
}
}
/*
* Passivate a metaslab group and remove it from the allocation rotor.
* Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
* a metaslab group. This function will momentarily drop spa_config_locks
* that are lower than the SCL_ALLOC lock (see comment below).
*/
void
metaslab_group_passivate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
(SCL_ALLOC | SCL_ZIO));
if (--mg->mg_activation_count != 0) {
for (int i = 0; i < spa->spa_alloc_count; i++)
ASSERT(mc->mc_allocator[i].mca_rotor != mg);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count < 0);
return;
}
/*
* The spa_config_lock is an array of rwlocks, ordered as
* follows (from highest to lowest):
* SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
* SCL_ZIO > SCL_FREE > SCL_VDEV
* (For more information about the spa_config_lock see spa_misc.c)
* The higher the lock, the broader its coverage. When we passivate
* a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
* config locks. However, the metaslab group's taskq might be trying
* to preload metaslabs so we must drop the SCL_ZIO lock and any
* lower locks to allow the I/O to complete. At a minimum,
* we continue to hold the SCL_ALLOC lock, which prevents any future
* allocations from taking place and any changes to the vdev tree.
*/
spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
taskq_wait_outstanding(spa->spa_metaslab_taskq, 0);
spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
metaslab_group_alloc_update(mg);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
metaslab_t *msp = mga->mga_primary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
msp = mga->mga_secondary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
}
mgprev = mg->mg_prev;
mgnext = mg->mg_next;
if (mg == mgnext) {
mgnext = NULL;
} else {
mgprev->mg_next = mgnext;
mgnext->mg_prev = mgprev;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
if (mc->mc_allocator[i].mca_rotor == mg)
mc->mc_allocator[i].mca_rotor = mgnext;
}
mg->mg_prev = NULL;
mg->mg_next = NULL;
}
boolean_t
metaslab_group_initialized(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
vdev_stat_t *vs = &vd->vdev_stat;
return (vs->vs_space != 0 && mg->mg_activation_count > 0);
}
uint64_t
metaslab_group_get_space(metaslab_group_t *mg)
{
/*
* Note that the number of nodes in mg_metaslab_tree may be one less
* than vdev_ms_count, due to the embedded log metaslab.
*/
mutex_enter(&mg->mg_lock);
uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
mutex_exit(&mg->mg_lock);
return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
}
void
metaslab_group_histogram_verify(metaslab_group_t *mg)
{
uint64_t *mg_hist;
avl_tree_t *t = &mg->mg_metaslab_tree;
uint64_t ashift = mg->mg_vd->vdev_ashift;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
- mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
+ mg_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
- ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
+ ASSERT3U(ZFS_RANGE_TREE_HISTOGRAM_SIZE, >=,
SPACE_MAP_HISTOGRAM_SIZE + ashift);
mutex_enter(&mg->mg_lock);
for (metaslab_t *msp = avl_first(t);
msp != NULL; msp = AVL_NEXT(t, msp)) {
VERIFY3P(msp->ms_group, ==, mg);
/* skip if not active */
if (msp->ms_sm == NULL)
continue;
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
mg_hist[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
}
- for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
+ for (int i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i ++)
VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
mutex_exit(&mg->mg_lock);
- kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
+ kmem_free(mg_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE);
}
static void
metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
void
metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
ASSERT3U(mg->mg_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
ASSERT3U(mc->mc_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
{
ASSERT(msp->ms_group == NULL);
mutex_enter(&mg->mg_lock);
msp->ms_group = mg;
msp->ms_weight = 0;
avl_add(&mg->mg_metaslab_tree, msp);
mutex_exit(&mg->mg_lock);
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_add(mg, msp);
mutex_exit(&msp->ms_lock);
}
static void
metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
{
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_remove(mg, msp);
mutex_exit(&msp->ms_lock);
mutex_enter(&mg->mg_lock);
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
msp->ms_group = NULL;
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(MUTEX_HELD(&mg->mg_lock));
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
msp->ms_weight = weight;
avl_add(&mg->mg_metaslab_tree, msp);
}
static void
metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
/*
* Although in principle the weight can be any value, in
* practice we do not use values in the range [1, 511].
*/
ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
ASSERT(MUTEX_HELD(&msp->ms_lock));
mutex_enter(&mg->mg_lock);
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
/*
- * Calculate the fragmentation for a given metaslab group. We can use
- * a simple average here since all metaslabs within the group must have
- * the same size. The return value will be a value between 0 and 100
+ * Calculate the fragmentation for a given metaslab group. Weight metaslabs
+ * on the amount of free space. The return value will be between 0 and 100
* (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
* group have a fragmentation metric.
*/
uint64_t
metaslab_group_fragmentation(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
uint64_t fragmentation = 0;
- uint64_t valid_ms = 0;
+ uint64_t valid_ms = 0, total_ms = 0;
+ uint64_t free, total_free = 0;
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
- if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
- continue;
if (msp->ms_group != mg)
continue;
+ total_ms++;
+ if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
+ continue;
valid_ms++;
- fragmentation += msp->ms_fragmentation;
+ free = (msp->ms_size - metaslab_allocated_space(msp)) /
+ SPA_MINBLOCKSIZE; /* To prevent overflows. */
+ total_free += free;
+ fragmentation += msp->ms_fragmentation * free;
}
- if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
+ if (valid_ms < (total_ms + 1) / 2 || total_free == 0)
return (ZFS_FRAG_INVALID);
- fragmentation /= valid_ms;
+ fragmentation /= total_free;
ASSERT3U(fragmentation, <=, 100);
return (fragmentation);
}
/*
* Determine if a given metaslab group should skip allocations. A metaslab
* group should avoid allocations if its free capacity is less than the
* zfs_mg_noalloc_threshold or its fragmentation metric is greater than
* zfs_mg_fragmentation_threshold and there is at least one metaslab group
* that can still handle allocations. If the allocation throttle is enabled
* then we skip allocations to devices that have reached their maximum
* allocation queue depth unless the selected metaslab group is the only
* eligible group remaining.
*/
static boolean_t
metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
int flags, uint64_t psize, int allocator, int d)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_class_t *mc = mg->mg_class;
/*
* We can only consider skipping this metaslab group if it's
* in the normal metaslab class and there are other metaslab
* groups to select from. Otherwise, we always consider it eligible
* for allocations.
*/
if ((mc != spa_normal_class(spa) &&
mc != spa_special_class(spa) &&
mc != spa_dedup_class(spa)) ||
mc->mc_groups <= 1)
return (B_TRUE);
/*
* If the metaslab group's mg_allocatable flag is set (see comments
* in metaslab_group_alloc_update() for more information) and
* the allocation throttle is disabled then allow allocations to this
* device. However, if the allocation throttle is enabled then
* check if we have reached our allocation limit (mga_alloc_queue_depth)
* to determine if we should allow allocations to this metaslab group.
* If all metaslab groups are no longer considered allocatable
* (mc_alloc_groups == 0) or we're trying to allocate the smallest
* gang block size then we allow allocations on this metaslab group
* regardless of the mg_allocatable or throttle settings.
*/
if (mg->mg_allocatable) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
int64_t qdepth;
uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
if (!mc->mc_alloc_throttle_enabled)
return (B_TRUE);
/*
* If this metaslab group does not have any free space, then
* there is no point in looking further.
*/
if (mg->mg_no_free_space)
return (B_FALSE);
/*
* Some allocations (e.g., those coming from device removal
* where the * allocations are not even counted in the
* metaslab * allocation queues) are allowed to bypass
* the throttle.
*/
if (flags & METASLAB_DONT_THROTTLE)
return (B_TRUE);
/*
* Relax allocation throttling for ditto blocks. Due to
* random imbalances in allocation it tends to push copies
* to one vdev, that looks a bit better at the moment.
*/
qmax = qmax * (4 + d) / 4;
qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
/*
* If this metaslab group is below its qmax or it's
* the only allocatable metaslab group, then attempt
* to allocate from it.
*/
if (qdepth < qmax || mc->mc_alloc_groups == 1)
return (B_TRUE);
ASSERT3U(mc->mc_alloc_groups, >, 1);
/*
* Since this metaslab group is at or over its qmax, we
* need to determine if there are metaslab groups after this
* one that might be able to handle this allocation. This is
* racy since we can't hold the locks for all metaslab
* groups at the same time when we make this check.
*/
for (metaslab_group_t *mgp = mg->mg_next;
mgp != rotor; mgp = mgp->mg_next) {
metaslab_group_allocator_t *mgap =
&mgp->mg_allocator[allocator];
qmax = mgap->mga_cur_max_alloc_queue_depth;
qmax = qmax * (4 + d) / 4;
qdepth =
zfs_refcount_count(&mgap->mga_alloc_queue_depth);
/*
* If there is another metaslab group that
* might be able to handle the allocation, then
* we return false so that we skip this group.
*/
if (qdepth < qmax && !mgp->mg_no_free_space)
return (B_FALSE);
}
/*
* We didn't find another group to handle the allocation
* so we can't skip this metaslab group even though
* we are at or over our qmax.
*/
return (B_TRUE);
} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* ==========================================================================
* Range tree callbacks
* ==========================================================================
*/
/*
* Comparison function for the private size-ordered tree using 32-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
__attribute__((always_inline)) inline
static int
metaslab_rangesize32_compare(const void *x1, const void *x2)
{
- const range_seg32_t *r1 = x1;
- const range_seg32_t *r2 = x2;
+ const zfs_range_seg32_t *r1 = x1;
+ const zfs_range_seg32_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
}
/*
* Comparison function for the private size-ordered tree using 64-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
__attribute__((always_inline)) inline
static int
metaslab_rangesize64_compare(const void *x1, const void *x2)
{
- const range_seg64_t *r1 = x1;
- const range_seg64_t *r2 = x2;
+ const zfs_range_seg64_t *r1 = x1;
+ const zfs_range_seg64_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
}
typedef struct metaslab_rt_arg {
zfs_btree_t *mra_bt;
uint32_t mra_floor_shift;
} metaslab_rt_arg_t;
struct mssa_arg {
- range_tree_t *rt;
+ zfs_range_tree_t *rt;
metaslab_rt_arg_t *mra;
};
static void
metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
{
struct mssa_arg *mssap = arg;
- range_tree_t *rt = mssap->rt;
+ zfs_range_tree_t *rt = mssap->rt;
metaslab_rt_arg_t *mrap = mssap->mra;
- range_seg_max_t seg = {0};
- rs_set_start(&seg, rt, start);
- rs_set_end(&seg, rt, start + size);
+ zfs_range_seg_max_t seg = {0};
+ zfs_rs_set_start(&seg, rt, start);
+ zfs_rs_set_end(&seg, rt, start + size);
metaslab_rt_add(rt, &seg, mrap);
}
static void
-metaslab_size_tree_full_load(range_tree_t *rt)
+metaslab_size_tree_full_load(zfs_range_tree_t *rt)
{
metaslab_rt_arg_t *mrap = rt->rt_arg;
METASLABSTAT_BUMP(metaslabstat_reload_tree);
ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
mrap->mra_floor_shift = 0;
struct mssa_arg arg = {0};
arg.rt = rt;
arg.mra = mrap;
- range_tree_walk(rt, metaslab_size_sorted_add, &arg);
+ zfs_range_tree_walk(rt, metaslab_size_sorted_add, &arg);
}
ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
- range_seg32_t, metaslab_rangesize32_compare)
+ zfs_range_seg32_t, metaslab_rangesize32_compare)
ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
- range_seg64_t, metaslab_rangesize64_compare)
+ zfs_range_seg64_t, metaslab_rangesize64_compare)
/*
* Create any block allocator specific components. The current allocators
- * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
+ * rely on using both a size-ordered zfs_range_tree_t and an array of
+ * uint64_t's.
*/
static void
-metaslab_rt_create(range_tree_t *rt, void *arg)
+metaslab_rt_create(zfs_range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
size_t size;
int (*compare) (const void *, const void *);
bt_find_in_buf_f bt_find;
switch (rt->rt_type) {
- case RANGE_SEG32:
- size = sizeof (range_seg32_t);
+ case ZFS_RANGE_SEG32:
+ size = sizeof (zfs_range_seg32_t);
compare = metaslab_rangesize32_compare;
bt_find = metaslab_rt_find_rangesize32_in_buf;
break;
- case RANGE_SEG64:
- size = sizeof (range_seg64_t);
+ case ZFS_RANGE_SEG64:
+ size = sizeof (zfs_range_seg64_t);
compare = metaslab_rangesize64_compare;
bt_find = metaslab_rt_find_rangesize64_in_buf;
break;
default:
panic("Invalid range seg type %d", rt->rt_type);
}
zfs_btree_create(size_tree, compare, bt_find, size);
mrap->mra_floor_shift = metaslab_by_size_min_shift;
}
static void
-metaslab_rt_destroy(range_tree_t *rt, void *arg)
+metaslab_rt_destroy(zfs_range_tree_t *rt, void *arg)
{
(void) rt;
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_destroy(size_tree);
kmem_free(mrap, sizeof (*mrap));
}
static void
-metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
+metaslab_rt_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
- if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
+ if (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt) <
(1ULL << mrap->mra_floor_shift))
return;
zfs_btree_add(size_tree, rs);
}
static void
-metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
+metaslab_rt_remove(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
- if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL <<
+ if (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt) < (1ULL <<
mrap->mra_floor_shift))
return;
zfs_btree_remove(size_tree, rs);
}
static void
-metaslab_rt_vacate(range_tree_t *rt, void *arg)
+metaslab_rt_vacate(zfs_range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
metaslab_rt_create(rt, arg);
}
-static const range_tree_ops_t metaslab_rt_ops = {
+static const zfs_range_tree_ops_t metaslab_rt_ops = {
.rtop_create = metaslab_rt_create,
.rtop_destroy = metaslab_rt_destroy,
.rtop_add = metaslab_rt_add,
.rtop_remove = metaslab_rt_remove,
.rtop_vacate = metaslab_rt_vacate
};
/*
* ==========================================================================
* Common allocator routines
* ==========================================================================
*/
/*
* Return the maximum contiguous segment within the metaslab.
*/
uint64_t
metaslab_largest_allocatable(metaslab_t *msp)
{
zfs_btree_t *t = &msp->ms_allocatable_by_size;
- range_seg_t *rs;
+ zfs_range_seg_t *rs;
if (t == NULL)
return (0);
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL)
return (0);
- return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
+ return (zfs_rs_get_end(rs, msp->ms_allocatable) - zfs_rs_get_start(rs,
msp->ms_allocatable));
}
/*
* Return the maximum contiguous segment within the unflushed frees of this
* metaslab.
*/
static uint64_t
metaslab_largest_unflushed_free(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_unflushed_frees == NULL)
return (0);
if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_unflushed_frees);
- range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
+ zfs_range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
NULL);
if (rs == NULL)
return (0);
/*
* When a range is freed from the metaslab, that range is added to
* both the unflushed frees and the deferred frees. While the block
* will eventually be usable, if the metaslab were loaded the range
* would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
* txgs had passed. As a result, when attempting to estimate an upper
* bound for the largest currently-usable free segment in the
* metaslab, we need to not consider any ranges currently in the defer
* trees. This algorithm approximates the largest available chunk in
* the largest range in the unflushed_frees tree by taking the first
* chunk. While this may be a poor estimate, it should only remain so
* briefly and should eventually self-correct as frees are no longer
* deferred. Similar logic applies to the ms_freed tree. See
* metaslab_load() for more details.
*
* There are two primary sources of inaccuracy in this estimate. Both
* are tolerated for performance reasons. The first source is that we
* only check the largest segment for overlaps. Smaller segments may
* have more favorable overlaps with the other trees, resulting in
* larger usable chunks. Second, we only look at the first chunk in
* the largest segment; there may be other usable chunks in the
* largest segment, but we ignore them.
*/
- uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
- uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
+ uint64_t rstart = zfs_rs_get_start(rs, msp->ms_unflushed_frees);
+ uint64_t rsize = zfs_rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
uint64_t start = 0;
uint64_t size = 0;
- boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
- rsize, &start, &size);
+ boolean_t found = zfs_range_tree_find_in(msp->ms_defer[t],
+ rstart, rsize, &start, &size);
if (found) {
if (rstart == start)
return (0);
rsize = start - rstart;
}
}
uint64_t start = 0;
uint64_t size = 0;
- boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
+ boolean_t found = zfs_range_tree_find_in(msp->ms_freed, rstart,
rsize, &start, &size);
if (found)
rsize = start - rstart;
return (rsize);
}
-static range_seg_t *
-metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
+static zfs_range_seg_t *
+metaslab_block_find(zfs_btree_t *t, zfs_range_tree_t *rt, uint64_t start,
uint64_t size, zfs_btree_index_t *where)
{
- range_seg_t *rs;
- range_seg_max_t rsearch;
+ zfs_range_seg_t *rs;
+ zfs_range_seg_max_t rsearch;
- rs_set_start(&rsearch, rt, start);
- rs_set_end(&rsearch, rt, start + size);
+ zfs_rs_set_start(&rsearch, rt, start);
+ zfs_rs_set_end(&rsearch, rt, start + size);
rs = zfs_btree_find(t, &rsearch, where);
if (rs == NULL) {
rs = zfs_btree_next(t, where, where);
}
return (rs);
}
/*
* This is a helper function that can be used by the allocator to find a
* suitable block to allocate. This will search the specified B-tree looking
* for a block that matches the specified criteria.
*/
static uint64_t
-metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
+metaslab_block_picker(zfs_range_tree_t *rt, uint64_t *cursor, uint64_t size,
uint64_t max_search)
{
if (*cursor == 0)
*cursor = rt->rt_start;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t where;
- range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
+ zfs_range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size,
+ &where);
uint64_t first_found;
int count_searched = 0;
if (rs != NULL)
- first_found = rs_get_start(rs, rt);
+ first_found = zfs_rs_get_start(rs, rt);
- while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
+ while (rs != NULL && (zfs_rs_get_start(rs, rt) - first_found <=
max_search || count_searched < metaslab_min_search_count)) {
- uint64_t offset = rs_get_start(rs, rt);
- if (offset + size <= rs_get_end(rs, rt)) {
+ uint64_t offset = zfs_rs_get_start(rs, rt);
+ if (offset + size <= zfs_rs_get_end(rs, rt)) {
*cursor = offset + size;
return (offset);
}
rs = zfs_btree_next(bt, &where, &where);
count_searched++;
}
*cursor = 0;
return (-1ULL);
}
static uint64_t metaslab_df_alloc(metaslab_t *msp, uint64_t size);
static uint64_t metaslab_cf_alloc(metaslab_t *msp, uint64_t size);
static uint64_t metaslab_ndf_alloc(metaslab_t *msp, uint64_t size);
metaslab_ops_t *metaslab_allocator(spa_t *spa);
static metaslab_ops_t metaslab_allocators[] = {
{ "dynamic", metaslab_df_alloc },
{ "cursor", metaslab_cf_alloc },
{ "new-dynamic", metaslab_ndf_alloc },
};
static int
spa_find_allocator_byname(const char *val)
{
int a = ARRAY_SIZE(metaslab_allocators) - 1;
if (strcmp("new-dynamic", val) == 0)
return (-1); /* remove when ndf is working */
for (; a >= 0; a--) {
if (strcmp(val, metaslab_allocators[a].msop_name) == 0)
return (a);
}
return (-1);
}
void
spa_set_allocator(spa_t *spa, const char *allocator)
{
int a = spa_find_allocator_byname(allocator);
if (a < 0) a = 0;
spa->spa_active_allocator = a;
zfs_dbgmsg("spa allocator: %s", metaslab_allocators[a].msop_name);
}
int
spa_get_allocator(spa_t *spa)
{
return (spa->spa_active_allocator);
}
#if defined(_KERNEL)
int
param_set_active_allocator_common(const char *val)
{
char *p;
if (val == NULL)
return (SET_ERROR(EINVAL));
if ((p = strchr(val, '\n')) != NULL)
*p = '\0';
int a = spa_find_allocator_byname(val);
if (a < 0)
return (SET_ERROR(EINVAL));
zfs_active_allocator = metaslab_allocators[a].msop_name;
return (0);
}
#endif
metaslab_ops_t *
metaslab_allocator(spa_t *spa)
{
int allocator = spa_get_allocator(spa);
return (&metaslab_allocators[allocator]);
}
/*
* ==========================================================================
* Dynamic Fit (df) block allocator
*
* Search for a free chunk of at least this size, starting from the last
* offset (for this alignment of block) looking for up to
* metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
* found within 16MB, then return a free chunk of exactly the requested size (or
* larger).
*
* If it seems like searching from the last offset will be unproductive, skip
* that and just return a free chunk of exactly the requested size (or larger).
* This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
* mechanism is probably not very useful and may be removed in the future.
*
* The behavior when not searching can be changed to return the largest free
* chunk, instead of a free chunk of exactly the requested size, by setting
* metaslab_df_use_largest_segment.
* ==========================================================================
*/
static uint64_t
metaslab_df_alloc(metaslab_t *msp, uint64_t size)
{
/*
* Find the largest power of 2 block size that evenly divides the
* requested size. This is used to try to allocate blocks with similar
* alignment from the same area of the metaslab (i.e. same cursor
* bucket) but it does not guarantee that other allocations sizes
* may exist in the same region.
*/
uint64_t align = size & -size;
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
- range_tree_t *rt = msp->ms_allocatable;
- uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size;
+ zfs_range_tree_t *rt = msp->ms_allocatable;
+ uint_t free_pct = zfs_range_tree_space(rt) * 100 / msp->ms_size;
uint64_t offset;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're running low on space, find a segment based on size,
* rather than iterating based on offset.
*/
if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
free_pct < metaslab_df_free_pct) {
offset = -1;
} else {
offset = metaslab_block_picker(rt,
cursor, size, metaslab_df_max_search);
}
if (offset == -1) {
- range_seg_t *rs;
+ zfs_range_seg_t *rs;
if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
if (metaslab_df_use_largest_segment) {
/* use largest free segment */
rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
} else {
zfs_btree_index_t where;
/* use segment of this size, or next largest */
rs = metaslab_block_find(&msp->ms_allocatable_by_size,
rt, msp->ms_start, size, &where);
}
- if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
- rt)) {
- offset = rs_get_start(rs, rt);
+ if (rs != NULL && zfs_rs_get_start(rs, rt) + size <=
+ zfs_rs_get_end(rs, rt)) {
+ offset = zfs_rs_get_start(rs, rt);
*cursor = offset + size;
}
}
return (offset);
}
/*
* ==========================================================================
* Cursor fit block allocator -
* Select the largest region in the metaslab, set the cursor to the beginning
* of the range and the cursor_end to the end of the range. As allocations
* are made advance the cursor. Continue allocating from the cursor until
* the range is exhausted and then find a new range.
* ==========================================================================
*/
static uint64_t
metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
{
- range_tree_t *rt = msp->ms_allocatable;
+ zfs_range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
uint64_t *cursor = &msp->ms_lbas[0];
uint64_t *cursor_end = &msp->ms_lbas[1];
uint64_t offset = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(*cursor_end, >=, *cursor);
if ((*cursor + size) > *cursor_end) {
- range_seg_t *rs;
+ zfs_range_seg_t *rs;
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
- if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
- size)
+ if (rs == NULL || (zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt)) < size)
return (-1ULL);
- *cursor = rs_get_start(rs, rt);
- *cursor_end = rs_get_end(rs, rt);
+ *cursor = zfs_rs_get_start(rs, rt);
+ *cursor_end = zfs_rs_get_end(rs, rt);
}
offset = *cursor;
*cursor += size;
return (offset);
}
/*
* ==========================================================================
* New dynamic fit allocator -
* Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
* contiguous blocks. If no region is found then just use the largest segment
* that remains.
* ==========================================================================
*/
/*
* Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
* to request from the allocator.
*/
uint64_t metaslab_ndf_clump_shift = 4;
static uint64_t
metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
{
zfs_btree_t *t = &msp->ms_allocatable->rt_root;
- range_tree_t *rt = msp->ms_allocatable;
+ zfs_range_tree_t *rt = msp->ms_allocatable;
zfs_btree_index_t where;
- range_seg_t *rs;
- range_seg_max_t rsearch;
+ zfs_range_seg_t *rs;
+ zfs_range_seg_max_t rsearch;
uint64_t hbit = highbit64(size);
uint64_t *cursor = &msp->ms_lbas[hbit - 1];
uint64_t max_size = metaslab_largest_allocatable(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (max_size < size)
return (-1ULL);
- rs_set_start(&rsearch, rt, *cursor);
- rs_set_end(&rsearch, rt, *cursor + size);
+ zfs_rs_set_start(&rsearch, rt, *cursor);
+ zfs_rs_set_end(&rsearch, rt, *cursor + size);
rs = zfs_btree_find(t, &rsearch, &where);
- if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
+ if (rs == NULL || (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt)) <
+ size) {
t = &msp->ms_allocatable_by_size;
- rs_set_start(&rsearch, rt, 0);
- rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
+ zfs_rs_set_start(&rsearch, rt, 0);
+ zfs_rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
metaslab_ndf_clump_shift)));
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL)
rs = zfs_btree_next(t, &where, &where);
ASSERT(rs != NULL);
}
- if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
- *cursor = rs_get_start(rs, rt) + size;
- return (rs_get_start(rs, rt));
+ if ((zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt)) >= size) {
+ *cursor = zfs_rs_get_start(rs, rt) + size;
+ return (zfs_rs_get_start(rs, rt));
}
return (-1ULL);
}
/*
* ==========================================================================
* Metaslabs
* ==========================================================================
*/
/*
* Wait for any in-progress metaslab loads to complete.
*/
static void
metaslab_load_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_loading) {
ASSERT(!msp->ms_loaded);
cv_wait(&msp->ms_load_cv, &msp->ms_lock);
}
}
/*
* Wait for any in-progress flushing to complete.
*/
static void
metaslab_flush_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_flushing)
cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
}
static unsigned int
metaslab_idx_func(multilist_t *ml, void *arg)
{
metaslab_t *msp = arg;
/*
* ms_id values are allocated sequentially, so full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
}
uint64_t
metaslab_allocated_space(metaslab_t *msp)
{
return (msp->ms_allocated_space);
}
/*
* Verify that the space accounting on disk matches the in-core range_trees.
*/
static void
metaslab_verify_space(metaslab_t *msp, uint64_t txg)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t allocating = 0;
uint64_t sm_free_space, msp_free_space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(!msp->ms_condensing);
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can only verify the metaslab space when we're called
* from syncing context with a loaded metaslab that has an
* allocated space map. Calling this in non-syncing context
* does not provide a consistent view of the metaslab since
* we're performing allocations in the future.
*/
if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
!msp->ms_loaded)
return;
/*
* Even though the smp_alloc field can get negative,
* when it comes to a metaslab's space map, that should
* never be the case.
*/
ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
ASSERT3U(space_map_allocated(msp->ms_sm), >=,
- range_tree_space(msp->ms_unflushed_frees));
+ zfs_range_tree_space(msp->ms_unflushed_frees));
ASSERT3U(metaslab_allocated_space(msp), ==,
space_map_allocated(msp->ms_sm) +
- range_tree_space(msp->ms_unflushed_allocs) -
- range_tree_space(msp->ms_unflushed_frees));
+ zfs_range_tree_space(msp->ms_unflushed_allocs) -
+ zfs_range_tree_space(msp->ms_unflushed_frees));
sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
/*
* Account for future allocations since we would have
* already deducted that space from the ms_allocatable.
*/
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
allocating +=
- range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
+ zfs_range_tree_space(msp->ms_allocating[(txg + t) &
+ TXG_MASK]);
}
ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
msp->ms_allocating_total);
ASSERT3U(msp->ms_deferspace, ==,
- range_tree_space(msp->ms_defer[0]) +
- range_tree_space(msp->ms_defer[1]));
+ zfs_range_tree_space(msp->ms_defer[0]) +
+ zfs_range_tree_space(msp->ms_defer[1]));
- msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
- msp->ms_deferspace + range_tree_space(msp->ms_freed);
+ msp_free_space = zfs_range_tree_space(msp->ms_allocatable) +
+ allocating + msp->ms_deferspace +
+ zfs_range_tree_space(msp->ms_freed);
VERIFY3U(sm_free_space, ==, msp_free_space);
}
static void
metaslab_aux_histograms_clear(metaslab_t *msp)
{
/*
* Auxiliary histograms are only cleared when resetting them,
* which can only happen while the metaslab is loaded.
*/
ASSERT(msp->ms_loaded);
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
for (int t = 0; t < TXG_DEFER_SIZE; t++)
memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
}
static void
metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
- range_tree_t *rt)
+ zfs_range_tree_t *rt)
{
/*
* This is modeled after space_map_histogram_add(), so refer to that
* function for implementation details. We want this to work like
* the space map histogram, and not the range tree histogram, as we
* are essentially constructing a delta that will be later subtracted
* from the space map histogram.
*/
int idx = 0;
- for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
+ for (int i = shift; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT3U(i, >=, idx + shift);
histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
ASSERT3U(idx + shift, ==, i);
idx++;
ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
}
}
}
/*
* Called at every sync pass that the metaslab gets synced.
*
* The reason is that we want our auxiliary histograms to be updated
* wherever the metaslab's space map histogram is updated. This way
* we stay consistent on which parts of the metaslab space map's
* histogram are currently not available for allocations (e.g because
* they are in the defer, freed, and freeing trees).
*/
static void
metaslab_aux_histograms_update(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(sm != NULL);
/*
* This is similar to the metaslab's space map histogram updates
* that take place in metaslab_sync(). The only difference is that
* we only care about segments that haven't made it into the
* ms_allocatable tree yet.
*/
if (msp->ms_loaded) {
metaslab_aux_histograms_clear(msp);
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freed);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
metaslab_aux_histogram_add(msp->ms_deferhist[t],
sm->sm_shift, msp->ms_defer[t]);
}
}
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freeing);
}
/*
* Called every time we are done syncing (writing to) the metaslab,
* i.e. at the end of each sync pass.
* [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
*/
static void
metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
if (sm == NULL) {
/*
* We came here from metaslab_init() when creating/opening a
* pool, looking at a metaslab that hasn't had any allocations
* yet.
*/
return;
}
/*
* This is similar to the actions that we take for the ms_freed
* and ms_defer trees in metaslab_sync_done().
*/
uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
if (defer_allowed) {
memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
sizeof (msp->ms_synchist));
} else {
memset(msp->ms_deferhist[hist_index], 0,
sizeof (msp->ms_deferhist[hist_index]));
}
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
}
/*
* Ensure that the metaslab's weight and fragmentation are consistent
* with the contents of the histogram (either the range tree's histogram
* or the space map's depending whether the metaslab is loaded).
*/
static void
metaslab_verify_weight_and_frag(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can end up here from vdev_remove_complete(), in which case we
* cannot do these assertions because we hold spa config locks and
* thus we are not allowed to read from the DMU.
*
* We check if the metaslab group has been removed and if that's
* the case we return immediately as that would mean that we are
* here from the aforementioned code path.
*/
if (msp->ms_group == NULL)
return;
/*
* Devices being removed always return a weight of 0 and leave
* fragmentation and ms_max_size as is - there is nothing for
* us to verify here.
*/
vdev_t *vd = msp->ms_group->mg_vd;
if (vd->vdev_removing)
return;
/*
* If the metaslab is dirty it probably means that we've done
* some allocations or frees that have changed our histograms
* and thus the weight.
*/
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&vd->vdev_ms_list, msp, t))
return;
}
/*
* This verification checks that our in-memory state is consistent
* with what's on disk. If the pool is read-only then there aren't
* any changes and we just have the initially-loaded state.
*/
if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
return;
/* some extra verification for in-core tree if you can */
if (msp->ms_loaded) {
- range_tree_stat_verify(msp->ms_allocatable);
+ zfs_range_tree_stat_verify(msp->ms_allocatable);
VERIFY(space_map_histogram_verify(msp->ms_sm,
msp->ms_allocatable));
}
uint64_t weight = msp->ms_weight;
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
uint64_t frag = msp->ms_fragmentation;
uint64_t max_segsize = msp->ms_max_size;
msp->ms_weight = 0;
msp->ms_fragmentation = 0;
/*
* This function is used for verification purposes and thus should
* not introduce any side-effects/mutations on the system's state.
*
* Regardless of whether metaslab_weight() thinks this metaslab
* should be active or not, we want to ensure that the actual weight
* (and therefore the value of ms_weight) would be the same if it
* was to be recalculated at this point.
*
* In addition we set the nodirty flag so metaslab_weight() does
* not dirty the metaslab for future TXGs (e.g. when trying to
* force condensing to upgrade the metaslab spacemaps).
*/
msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
VERIFY3U(max_segsize, ==, msp->ms_max_size);
/*
* If the weight type changed then there is no point in doing
* verification. Revert fields to their original values.
*/
if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
(!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
msp->ms_fragmentation = frag;
msp->ms_weight = weight;
return;
}
VERIFY3U(msp->ms_fragmentation, ==, frag);
VERIFY3U(msp->ms_weight, ==, weight);
}
/*
* If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
* this class that was used longest ago, and attempt to unload it. We don't
* want to spend too much time in this loop to prevent performance
* degradation, and we expect that most of the time this operation will
* succeed. Between that and the normal unloading processing during txg sync,
* we expect this to keep the metaslab memory usage under control.
*/
static void
metaslab_potentially_evict(metaslab_class_t *mc)
{
#ifdef _KERNEL
uint64_t allmem = arc_all_memory();
uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
uint_t tries = 0;
for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
tries++) {
unsigned int idx = multilist_get_random_index(
&mc->mc_metaslab_txg_list);
multilist_sublist_t *mls =
multilist_sublist_lock_idx(&mc->mc_metaslab_txg_list, idx);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
inuse * size) {
VERIFY3P(mls, ==, multilist_sublist_lock_idx(
&mc->mc_metaslab_txg_list, idx));
ASSERT3U(idx, ==,
metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
if (!multilist_link_active(&msp->ms_class_txg_node)) {
multilist_sublist_unlock(mls);
break;
}
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
/*
* If the metaslab is currently loading there are two
* cases. If it's the metaslab we're evicting, we
* can't continue on or we'll panic when we attempt to
* recursively lock the mutex. If it's another
* metaslab that's loading, it can be safely skipped,
* since we know it's very new and therefore not a
* good eviction candidate. We check later once the
* lock is held that the metaslab is fully loaded
* before actually unloading it.
*/
if (msp->ms_loading) {
msp = next_msp;
inuse =
spl_kmem_cache_inuse(zfs_btree_leaf_cache);
continue;
}
/*
* We can't unload metaslabs with no spacemap because
* they're not ready to be unloaded yet. We can't
* unload metaslabs with outstanding allocations
* because doing so could cause the metaslab's weight
* to decrease while it's unloaded, which violates an
* invariant that we use to prevent unnecessary
* loading. We also don't unload metaslabs that are
* currently active because they are high-weight
* metaslabs that are likely to be used in the near
* future.
*/
mutex_enter(&msp->ms_lock);
if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
msp->ms_allocating_total == 0) {
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
}
}
#else
(void) mc, (void) zfs_metaslab_mem_limit;
#endif
}
static int
metaslab_load_impl(metaslab_t *msp)
{
int error = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We temporarily drop the lock to unblock other operations while we
* are reading the space map. Therefore, metaslab_sync() and
* metaslab_sync_done() can run at the same time as we do.
*
* If we are using the log space maps, metaslab_sync() can't write to
* the metaslab's space map while we are loading as we only write to
* it when we are flushing the metaslab, and that can't happen while
* we are loading it.
*
* If we are not using log space maps though, metaslab_sync() can
* append to the space map while we are loading. Therefore we load
* only entries that existed when we started the load. Additionally,
* metaslab_sync_done() has to wait for the load to complete because
* there are potential races like metaslab_load() loading parts of the
* space map that are currently being appended by metaslab_sync(). If
* we didn't, the ms_allocatable would have entries that
* metaslab_sync_done() would try to re-add later.
*
* That's why before dropping the lock we remember the synced length
* of the metaslab and read up to that point of the space map,
* ignoring entries appended by metaslab_sync() that happen after we
* drop the lock.
*/
uint64_t length = msp->ms_synced_length;
mutex_exit(&msp->ms_lock);
hrtime_t load_start = gethrtime();
metaslab_rt_arg_t *mrap;
if (msp->ms_allocatable->rt_arg == NULL) {
mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
} else {
mrap = msp->ms_allocatable->rt_arg;
msp->ms_allocatable->rt_ops = NULL;
msp->ms_allocatable->rt_arg = NULL;
}
mrap->mra_bt = &msp->ms_allocatable_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
if (msp->ms_sm != NULL) {
error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
SM_FREE, length);
/* Now, populate the size-sorted tree. */
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
struct mssa_arg arg = {0};
arg.rt = msp->ms_allocatable;
arg.mra = mrap;
- range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
- &arg);
+ zfs_range_tree_walk(msp->ms_allocatable,
+ metaslab_size_sorted_add, &arg);
} else {
/*
* Add the size-sorted tree first, since we don't need to load
* the metaslab from the spacemap.
*/
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
/*
* The space map has not been allocated yet, so treat
* all the space in the metaslab as free and add it to the
* ms_allocatable tree.
*/
- range_tree_add(msp->ms_allocatable,
+ zfs_range_tree_add(msp->ms_allocatable,
msp->ms_start, msp->ms_size);
if (msp->ms_new) {
/*
* If the ms_sm doesn't exist, this means that this
* metaslab hasn't gone through metaslab_sync() and
* thus has never been dirtied. So we shouldn't
* expect any unflushed allocs or frees from previous
* TXGs.
*/
- ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
- ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
+ ASSERT(zfs_range_tree_is_empty(
+ msp->ms_unflushed_allocs));
+ ASSERT(zfs_range_tree_is_empty(
+ msp->ms_unflushed_frees));
}
}
/*
* We need to grab the ms_sync_lock to prevent metaslab_sync() from
* changing the ms_sm (or log_sm) and the metaslab's range trees
* while we are about to use them and populate the ms_allocatable.
* The ms_lock is insufficient for this because metaslab_sync() doesn't
* hold the ms_lock while writing the ms_checkpointing tree to disk.
*/
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
ASSERT(!msp->ms_condensing);
ASSERT(!msp->ms_flushing);
if (error != 0) {
mutex_exit(&msp->ms_sync_lock);
return (error);
}
ASSERT3P(msp->ms_group, !=, NULL);
msp->ms_loaded = B_TRUE;
/*
* Apply all the unflushed changes to ms_allocatable right
* away so any manipulations we do below have a clear view
* of what is allocated and what is free.
*/
- range_tree_walk(msp->ms_unflushed_allocs,
- range_tree_remove, msp->ms_allocatable);
- range_tree_walk(msp->ms_unflushed_frees,
- range_tree_add, msp->ms_allocatable);
+ zfs_range_tree_walk(msp->ms_unflushed_allocs,
+ zfs_range_tree_remove, msp->ms_allocatable);
+ zfs_range_tree_walk(msp->ms_unflushed_frees,
+ zfs_range_tree_add, msp->ms_allocatable);
ASSERT3P(msp->ms_group, !=, NULL);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (spa_syncing_log_sm(spa) != NULL) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LOG_SPACEMAP));
/*
* If we use a log space map we add all the segments
* that are in ms_unflushed_frees so they are available
* for allocation.
*
* ms_allocatable needs to contain all free segments
* that are ready for allocations (thus not segments
* from ms_freeing, ms_freed, and the ms_defer trees).
* But if we grab the lock in this code path at a sync
* pass later that 1, then it also contains the
* segments of ms_freed (they were added to it earlier
* in this path through ms_unflushed_frees). So we
* need to remove all the segments that exist in
* ms_freed from ms_allocatable as they will be added
* later in metaslab_sync_done().
*
* When there's no log space map, the ms_allocatable
* correctly doesn't contain any segments that exist
* in ms_freed [see ms_synced_length].
*/
- range_tree_walk(msp->ms_freed,
- range_tree_remove, msp->ms_allocatable);
+ zfs_range_tree_walk(msp->ms_freed,
+ zfs_range_tree_remove, msp->ms_allocatable);
}
/*
* If we are not using the log space map, ms_allocatable
* contains the segments that exist in the ms_defer trees
* [see ms_synced_length]. Thus we need to remove them
* from ms_allocatable as they will be added again in
* metaslab_sync_done().
*
* If we are using the log space map, ms_allocatable still
* contains the segments that exist in the ms_defer trees.
* Not because it read them through the ms_sm though. But
* because these segments are part of ms_unflushed_frees
* whose segments we add to ms_allocatable earlier in this
* code path.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
- range_tree_walk(msp->ms_defer[t],
- range_tree_remove, msp->ms_allocatable);
+ zfs_range_tree_walk(msp->ms_defer[t],
+ zfs_range_tree_remove, msp->ms_allocatable);
}
/*
* Call metaslab_recalculate_weight_and_sort() now that the
* metaslab is loaded so we get the metaslab's real weight.
*
* Unless this metaslab was created with older software and
* has not yet been converted to use segment-based weight, we
* expect the new weight to be better or equal to the weight
* that the metaslab had while it was not loaded. This is
* because the old weight does not take into account the
* consolidation of adjacent segments between TXGs. [see
* comment for ms_synchist and ms_deferhist[] for more info]
*/
uint64_t weight = msp->ms_weight;
uint64_t max_size = msp->ms_max_size;
metaslab_recalculate_weight_and_sort(msp);
if (!WEIGHT_IS_SPACEBASED(weight))
ASSERT3U(weight, <=, msp->ms_weight);
msp->ms_max_size = metaslab_largest_allocatable(msp);
ASSERT3U(max_size, <=, msp->ms_max_size);
hrtime_t load_end = gethrtime();
msp->ms_load_time = load_end;
zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, smp_length %llu, "
"unflushed_allocs %llu, unflushed_frees %llu, "
"freed %llu, defer %llu + %llu, unloaded time %llu ms, "
"loading_time %lld ms, ms_max_size %llu, "
"max size error %lld, "
"old_weight %llx, new_weight %llx",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)space_map_length(msp->ms_sm),
- (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
- (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
- (u_longlong_t)range_tree_space(msp->ms_freed),
- (u_longlong_t)range_tree_space(msp->ms_defer[0]),
- (u_longlong_t)range_tree_space(msp->ms_defer[1]),
+ (u_longlong_t)zfs_range_tree_space(msp->ms_unflushed_allocs),
+ (u_longlong_t)zfs_range_tree_space(msp->ms_unflushed_frees),
+ (u_longlong_t)zfs_range_tree_space(msp->ms_freed),
+ (u_longlong_t)zfs_range_tree_space(msp->ms_defer[0]),
+ (u_longlong_t)zfs_range_tree_space(msp->ms_defer[1]),
(longlong_t)((load_start - msp->ms_unload_time) / 1000000),
(longlong_t)((load_end - load_start) / 1000000),
(u_longlong_t)msp->ms_max_size,
(u_longlong_t)msp->ms_max_size - max_size,
(u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
metaslab_verify_space(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_sync_lock);
return (0);
}
int
metaslab_load(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* There may be another thread loading the same metaslab, if that's
* the case just wait until the other thread is done and return.
*/
metaslab_load_wait(msp);
if (msp->ms_loaded)
return (0);
VERIFY(!msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We set the loading flag BEFORE potentially dropping the lock to
* wait for an ongoing flush (see ms_flushing below). This way other
* threads know that there is already a thread that is loading this
* metaslab.
*/
msp->ms_loading = B_TRUE;
/*
* Wait for any in-progress flushing to finish as we drop the ms_lock
* both here (during space_map_load()) and in metaslab_flush() (when
* we flush our changes to the ms_sm).
*/
if (msp->ms_flushing)
metaslab_flush_wait(msp);
/*
* In the possibility that we were waiting for the metaslab to be
* flushed (where we temporarily dropped the ms_lock), ensure that
* no one else loaded the metaslab somehow.
*/
ASSERT(!msp->ms_loaded);
/*
* If we're loading a metaslab in the normal class, consider evicting
* another one to keep our memory usage under the limit defined by the
* zfs_metaslab_mem_limit tunable.
*/
if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
msp->ms_group->mg_class) {
metaslab_potentially_evict(msp->ms_group->mg_class);
}
int error = metaslab_load_impl(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
msp->ms_loading = B_FALSE;
cv_broadcast(&msp->ms_load_cv);
return (error);
}
void
metaslab_unload(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* This can happen if a metaslab is selected for eviction (in
* metaslab_potentially_evict) and then unloaded during spa_sync (via
* metaslab_class_evict_old).
*/
if (!msp->ms_loaded)
return;
- range_tree_vacate(msp->ms_allocatable, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);
msp->ms_loaded = B_FALSE;
msp->ms_unload_time = gethrtime();
msp->ms_activation_weight = 0;
msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
if (msp->ms_group != NULL) {
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, weight %llx, "
"selected txg %llu (%llu ms ago), alloc_txg %llu, "
"loaded %llu ms ago, max_size %llu",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)msp->ms_weight,
(u_longlong_t)msp->ms_selected_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_selected_time) / 1000 / 1000,
(u_longlong_t)msp->ms_alloc_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_load_time) / 1000 / 1000,
(u_longlong_t)msp->ms_max_size);
}
/*
* We explicitly recalculate the metaslab's weight based on its space
* map (as it is now not loaded). We want unload metaslabs to always
* have their weights calculated from the space map histograms, while
* loaded ones have it calculated from their in-core range tree
* [see metaslab_load()]. This way, the weight reflects the information
* available in-core, whether it is loaded or not.
*
* If ms_group == NULL means that we came here from metaslab_fini(),
* at which point it doesn't make sense for us to do the recalculation
* and the sorting.
*/
if (msp->ms_group != NULL)
metaslab_recalculate_weight_and_sort(msp);
}
/*
* We want to optimize the memory use of the per-metaslab range
* trees. To do this, we store the segments in the range trees in
* units of sectors, zero-indexing from the start of the metaslab. If
* the vdev_ms_shift - the vdev_ashift is less than 32, we can store
* the ranges using two uint32_ts, rather than two uint64_ts.
*/
-range_seg_type_t
+zfs_range_seg_type_t
metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
uint64_t *start, uint64_t *shift)
{
if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
!zfs_metaslab_force_large_segs) {
*shift = vdev->vdev_ashift;
*start = msp->ms_start;
- return (RANGE_SEG32);
+ return (ZFS_RANGE_SEG32);
} else {
*shift = 0;
*start = 0;
- return (RANGE_SEG64);
+ return (ZFS_RANGE_SEG64);
}
}
void
metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
msp->ms_selected_txg = txg;
msp->ms_selected_time = gethrtime();
multilist_sublist_insert_tail(mls, msp);
multilist_sublist_unlock(mls);
}
void
metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta)
{
vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
ASSERT(vd->vdev_ms_count != 0);
metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
vdev_deflated_space(vd, space_delta));
}
int
metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
uint64_t txg, metaslab_t **msp)
{
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
metaslab_t *ms;
int error;
ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
multilist_link_init(&ms->ms_class_txg_node);
ms->ms_id = id;
ms->ms_start = id << vd->vdev_ms_shift;
ms->ms_size = 1ULL << vd->vdev_ms_shift;
ms->ms_allocator = -1;
ms->ms_new = B_TRUE;
vdev_ops_t *ops = vd->vdev_ops;
if (ops->vdev_op_metaslab_init != NULL)
ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
/*
* We only open space map objects that already exist. All others
* will be opened when we finally allocate an object for it. For
* readonly pools there is no need to open the space map object.
*
* Note:
* When called from vdev_expand(), we can't call into the DMU as
* we are holding the spa_config_lock as a writer and we would
* deadlock [see relevant comment in vdev_metaslab_init()]. in
* that case, the object parameter is zero though, so we won't
* call into the DMU.
*/
if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
!spa->spa_read_spacemaps)) {
error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
ms->ms_size, vd->vdev_ashift);
if (error != 0) {
kmem_free(ms, sizeof (metaslab_t));
return (error);
}
ASSERT(ms->ms_sm != NULL);
ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
}
uint64_t shift, start;
- range_seg_type_t type =
+ zfs_range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
- ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
+ ms->ms_allocatable = zfs_range_tree_create(NULL, type, NULL, start,
+ shift);
for (int t = 0; t < TXG_SIZE; t++) {
- ms->ms_allocating[t] = range_tree_create(NULL, type,
+ ms->ms_allocating[t] = zfs_range_tree_create(NULL, type,
NULL, start, shift);
}
- ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
- ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
+ ms->ms_freeing = zfs_range_tree_create(NULL, type, NULL, start, shift);
+ ms->ms_freed = zfs_range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
- ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
+ ms->ms_defer[t] = zfs_range_tree_create(NULL, type, NULL,
start, shift);
}
ms->ms_checkpointing =
- range_tree_create(NULL, type, NULL, start, shift);
+ zfs_range_tree_create(NULL, type, NULL, start, shift);
ms->ms_unflushed_allocs =
- range_tree_create(NULL, type, NULL, start, shift);
+ zfs_range_tree_create(NULL, type, NULL, start, shift);
metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
- ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
+ ms->ms_unflushed_frees = zfs_range_tree_create(&metaslab_rt_ops,
type, mrap, start, shift);
- ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
+ ms->ms_trim = zfs_range_tree_create(NULL, type, NULL, start, shift);
metaslab_group_add(mg, ms);
metaslab_set_fragmentation(ms, B_FALSE);
/*
* If we're opening an existing pool (txg == 0) or creating
* a new one (txg == TXG_INITIAL), all space is available now.
* If we're adding space to an existing pool, the new space
* does not become available until after this txg has synced.
* The metaslab's weight will also be initialized when we sync
* out this txg. This ensures that we don't attempt to allocate
* from it before we have initialized it completely.
*/
if (txg <= TXG_INITIAL) {
metaslab_sync_done(ms, 0);
metaslab_space_update(vd, mg->mg_class,
metaslab_allocated_space(ms), 0, 0);
}
if (txg != 0) {
vdev_dirty(vd, 0, NULL, txg);
vdev_dirty(vd, VDD_METASLAB, ms, txg);
}
*msp = ms;
return (0);
}
static void
metaslab_fini_flush_data(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (metaslab_unflushed_txg(msp) == 0) {
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
==, NULL);
return;
}
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
metaslab_unflushed_dirty(msp));
}
uint64_t
metaslab_unflushed_changes_memused(metaslab_t *ms)
{
- return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
- range_tree_numsegs(ms->ms_unflushed_frees)) *
+ return ((zfs_range_tree_numsegs(ms->ms_unflushed_allocs) +
+ zfs_range_tree_numsegs(ms->ms_unflushed_frees)) *
ms->ms_unflushed_allocs->rt_root.bt_elem_size);
}
void
metaslab_fini(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
metaslab_fini_flush_data(msp);
metaslab_group_remove(mg, msp);
mutex_enter(&msp->ms_lock);
VERIFY(msp->ms_group == NULL);
/*
* If this metaslab hasn't been through metaslab_sync_done() yet its
* space hasn't been accounted for in its vdev and doesn't need to be
* subtracted.
*/
if (!msp->ms_new) {
metaslab_space_update(vd, mg->mg_class,
-metaslab_allocated_space(msp), 0, -msp->ms_size);
}
space_map_close(msp->ms_sm);
msp->ms_sm = NULL;
metaslab_unload(msp);
- range_tree_destroy(msp->ms_allocatable);
- range_tree_destroy(msp->ms_freeing);
- range_tree_destroy(msp->ms_freed);
+ zfs_range_tree_destroy(msp->ms_allocatable);
+ zfs_range_tree_destroy(msp->ms_freeing);
+ zfs_range_tree_destroy(msp->ms_freed);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
- range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
- range_tree_destroy(msp->ms_unflushed_allocs);
- range_tree_destroy(msp->ms_checkpointing);
- range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
- range_tree_destroy(msp->ms_unflushed_frees);
+ zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
+ zfs_range_tree_destroy(msp->ms_unflushed_allocs);
+ zfs_range_tree_destroy(msp->ms_checkpointing);
+ zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
+ zfs_range_tree_destroy(msp->ms_unflushed_frees);
for (int t = 0; t < TXG_SIZE; t++) {
- range_tree_destroy(msp->ms_allocating[t]);
+ zfs_range_tree_destroy(msp->ms_allocating[t]);
}
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
- range_tree_destroy(msp->ms_defer[t]);
+ zfs_range_tree_destroy(msp->ms_defer[t]);
}
ASSERT0(msp->ms_deferspace);
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
- range_tree_vacate(msp->ms_trim, NULL, NULL);
- range_tree_destroy(msp->ms_trim);
+ zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
+ zfs_range_tree_destroy(msp->ms_trim);
mutex_exit(&msp->ms_lock);
cv_destroy(&msp->ms_load_cv);
cv_destroy(&msp->ms_flush_cv);
mutex_destroy(&msp->ms_lock);
mutex_destroy(&msp->ms_sync_lock);
ASSERT3U(msp->ms_allocator, ==, -1);
kmem_free(msp, sizeof (metaslab_t));
}
-#define FRAGMENTATION_TABLE_SIZE 17
-
/*
* This table defines a segment size based fragmentation metric that will
* allow each metaslab to derive its own fragmentation value. This is done
* by calculating the space in each bucket of the spacemap histogram and
* multiplying that by the fragmentation metric in this table. Doing
* this for all buckets and dividing it by the total amount of free
* space in this metaslab (i.e. the total free space in all buckets) gives
* us the fragmentation metric. This means that a high fragmentation metric
* equates to most of the free space being comprised of small segments.
* Conversely, if the metric is low, then most of the free space is in
- * large segments. A 10% change in fragmentation equates to approximately
- * double the number of segments.
+ * large segments.
*
- * This table defines 0% fragmented space using 16MB segments. Testing has
- * shown that segments that are greater than or equal to 16MB do not suffer
- * from drastic performance problems. Using this value, we derive the rest
- * of the table. Since the fragmentation value is never stored on disk, it
- * is possible to change these calculations in the future.
+ * This table defines 0% fragmented space using 512M segments. Using this value,
+ * we derive the rest of the table. This table originally went up to 16MB, but
+ * with larger recordsizes, larger ashifts, and use of raidz3, it is possible
+ * to have significantly larger allocations than were previously possible.
+ * Since the fragmentation value is never stored on disk, it is possible to
+ * change these calculations in the future.
*/
-static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
+static const int zfs_frag_table[] = {
100, /* 512B */
- 100, /* 1K */
- 98, /* 2K */
- 95, /* 4K */
- 90, /* 8K */
- 80, /* 16K */
- 70, /* 32K */
- 60, /* 64K */
- 50, /* 128K */
- 40, /* 256K */
- 30, /* 512K */
- 20, /* 1M */
- 15, /* 2M */
- 10, /* 4M */
- 5, /* 8M */
- 0 /* 16M */
+ 99, /* 1K */
+ 97, /* 2K */
+ 93, /* 4K */
+ 88, /* 8K */
+ 83, /* 16K */
+ 77, /* 32K */
+ 71, /* 64K */
+ 64, /* 128K */
+ 57, /* 256K */
+ 50, /* 512K */
+ 43, /* 1M */
+ 36, /* 2M */
+ 29, /* 4M */
+ 23, /* 8M */
+ 17, /* 16M */
+ 12, /* 32M */
+ 7, /* 64M */
+ 3, /* 128M */
+ 1, /* 256M */
+ 0, /* 512M */
};
+#define FRAGMENTATION_TABLE_SIZE \
+ (sizeof (zfs_frag_table)/(sizeof (zfs_frag_table[0])))
/*
* Calculate the metaslab's fragmentation metric and set ms_fragmentation.
* Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
* been upgraded and does not support this metric. Otherwise, the return
* value should be in the range [0, 100].
*/
static void
metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t fragmentation = 0;
uint64_t total = 0;
boolean_t feature_enabled = spa_feature_is_enabled(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM);
if (!feature_enabled) {
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
/*
* A null space map means that the entire metaslab is free
* and thus is not fragmented.
*/
if (msp->ms_sm == NULL) {
msp->ms_fragmentation = 0;
return;
}
/*
* If this metaslab's space map has not been upgraded, flag it
* so that we upgrade next time we encounter it.
*/
if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
uint64_t txg = spa_syncing_txg(spa);
vdev_t *vd = msp->ms_group->mg_vd;
/*
* If we've reached the final dirty txg, then we must
* be shutting down the pool. We don't want to dirty
* any data past this point so skip setting the condense
* flag. We can retry this action the next time the pool
* is imported. We also skip marking this metaslab for
* condensing if the caller has explicitly set nodirty.
*/
if (!nodirty &&
spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
msp->ms_condense_wanted = B_TRUE;
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
zfs_dbgmsg("txg %llu, requesting force condense: "
"ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
(u_longlong_t)msp->ms_id,
(u_longlong_t)vd->vdev_id);
}
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
uint64_t space = 0;
uint8_t shift = msp->ms_sm->sm_shift;
int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
FRAGMENTATION_TABLE_SIZE - 1);
if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
continue;
space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
total += space;
ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
fragmentation += space * zfs_frag_table[idx];
}
if (total > 0)
fragmentation /= total;
ASSERT3U(fragmentation, <=, 100);
msp->ms_fragmentation = fragmentation;
}
/*
* Compute a weight -- a selection preference value -- for the given metaslab.
* This is based on the amount of free space, the level of fragmentation,
* the LBA range, and whether the metaslab is loaded.
*/
static uint64_t
metaslab_space_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
uint64_t weight, space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The baseline weight is the metaslab's free space.
*/
space = msp->ms_size - metaslab_allocated_space(msp);
if (metaslab_fragmentation_factor_enabled &&
msp->ms_fragmentation != ZFS_FRAG_INVALID) {
/*
* Use the fragmentation information to inversely scale
* down the baseline weight. We need to ensure that we
* don't exclude this metaslab completely when it's 100%
* fragmented. To avoid this we reduce the fragmented value
* by 1.
*/
space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
/*
* If space < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. The fragmentation metric may have
* decreased the space to something smaller than
* SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
* so that we can consume any remaining space.
*/
if (space > 0 && space < SPA_MINBLOCKSIZE)
space = SPA_MINBLOCKSIZE;
}
weight = space;
/*
* Modern disks have uniform bit density and constant angular velocity.
* Therefore, the outer recording zones are faster (higher bandwidth)
* than the inner zones by the ratio of outer to inner track diameter,
* which is typically around 2:1. We account for this by assigning
* higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
* In effect, this means that we'll select the metaslab with the most
* free bandwidth rather than simply the one with the most free space.
*/
if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
ASSERT(weight >= space && weight <= 2 * space);
}
/*
* If this metaslab is one we're actively using, adjust its
* weight to make it preferable to any inactive metaslab so
* we'll polish it off. If the fragmentation on this metaslab
* has exceed our threshold, then don't mark it active.
*/
if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
}
WEIGHT_SET_SPACEBASED(weight);
return (weight);
}
/*
* Return the weight of the specified metaslab, according to the segment-based
* weighting algorithm. The metaslab must be loaded. This function can
* be called within a sync pass since it relies only on the metaslab's
* range tree which is always accurate when the metaslab is loaded.
*/
static uint64_t
metaslab_weight_from_range_tree(metaslab_t *msp)
{
uint64_t weight = 0;
uint32_t segments = 0;
ASSERT(msp->ms_loaded);
- for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
+ for (int i = ZFS_RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
i--) {
uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
segments <<= 1;
segments += msp->ms_allocatable->rt_histogram[i];
/*
* The range tree provides more precision than the space map
* and must be downgraded so that all values fit within the
* space map's histogram. This allows us to compare loaded
* vs. unloaded metaslabs to determine which metaslab is
* considered "best".
*/
if (i > max_idx)
continue;
if (segments != 0) {
WEIGHT_SET_COUNT(weight, segments);
WEIGHT_SET_INDEX(weight, i);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Calculate the weight based on the on-disk histogram. Should be applied
* only to unloaded metaslabs (i.e no incoming allocations) in-order to
* give results consistent with the on-disk state
*/
static uint64_t
metaslab_weight_from_spacemap(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(!msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(space_map_object(sm), !=, 0);
ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* Create a joint histogram from all the segments that have made
* it to the metaslab's space map histogram, that are not yet
* available for allocation because they are still in the freeing
* pipeline (e.g. freeing, freed, and defer trees). Then subtract
* these segments from the space map's histogram to get a more
* accurate weight.
*/
uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
deferspace_histogram[i] += msp->ms_synchist[i];
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
deferspace_histogram[i] += msp->ms_deferhist[t][i];
}
}
uint64_t weight = 0;
for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
deferspace_histogram[i]);
uint64_t count =
sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
if (count != 0) {
WEIGHT_SET_COUNT(weight, count);
WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Compute a segment-based weight for the specified metaslab. The weight
* is determined by highest bucket in the histogram. The information
* for the highest bucket is encoded into the weight value.
*/
static uint64_t
metaslab_segment_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
uint64_t weight = 0;
uint8_t shift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The metaslab is completely free.
*/
if (metaslab_allocated_space(msp) == 0) {
int idx = highbit64(msp->ms_size) - 1;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
if (idx < max_idx) {
WEIGHT_SET_COUNT(weight, 1ULL);
WEIGHT_SET_INDEX(weight, idx);
} else {
WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
WEIGHT_SET_INDEX(weight, max_idx);
}
WEIGHT_SET_ACTIVE(weight, 0);
ASSERT(!WEIGHT_IS_SPACEBASED(weight));
return (weight);
}
ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* If the metaslab is fully allocated then just make the weight 0.
*/
if (metaslab_allocated_space(msp) == msp->ms_size)
return (0);
/*
* If the metaslab is already loaded, then use the range tree to
* determine the weight. Otherwise, we rely on the space map information
* to generate the weight.
*/
if (msp->ms_loaded) {
weight = metaslab_weight_from_range_tree(msp);
} else {
weight = metaslab_weight_from_spacemap(msp);
}
/*
* If the metaslab was active the last time we calculated its weight
* then keep it active. We want to consume the entire region that
* is associated with this weight.
*/
if (msp->ms_activation_weight != 0 && weight != 0)
WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
return (weight);
}
/*
* Determine if we should attempt to allocate from this metaslab. If the
* metaslab is loaded, then we can determine if the desired allocation
* can be satisfied by looking at the size of the maximum free segment
* on that metaslab. Otherwise, we make our decision based on the metaslab's
* weight. For segment-based weighting we can determine the maximum
* allocation based on the index encoded in its value. For space-based
* weights we rely on the entire weight (excluding the weight-type bit).
*/
static boolean_t
metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
{
/*
* This case will usually but not always get caught by the checks below;
* metaslabs can be loaded by various means, including the trim and
* initialize code. Once that happens, without this check they are
* allocatable even before they finish their first txg sync.
*/
if (unlikely(msp->ms_new))
return (B_FALSE);
/*
* If the metaslab is loaded, ms_max_size is definitive and we can use
* the fast check. If it's not, the ms_max_size is a lower bound (once
* set), and we should use the fast check as long as we're not in
* try_hard and it's been less than zfs_metaslab_max_size_cache_sec
* seconds since the metaslab was unloaded.
*/
if (msp->ms_loaded ||
(msp->ms_max_size != 0 && !try_hard && gethrtime() <
msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
return (msp->ms_max_size >= asize);
boolean_t should_allocate;
if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
/*
* The metaslab segment weight indicates segments in the
* range [2^i, 2^(i+1)), where i is the index in the weight.
* Since the asize might be in the middle of the range, we
* should attempt the allocation if asize < 2^(i+1).
*/
should_allocate = (asize <
1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
} else {
should_allocate = (asize <=
(msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
}
return (should_allocate);
}
static uint64_t
metaslab_weight(metaslab_t *msp, boolean_t nodirty)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
uint64_t weight;
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_set_fragmentation(msp, nodirty);
/*
* Update the maximum size. If the metaslab is loaded, this will
* ensure that we get an accurate maximum size if newly freed space
* has been added back into the free tree. If the metaslab is
* unloaded, we check if there's a larger free segment in the
* unflushed frees. This is a lower bound on the largest allocatable
* segment size. Coalescing of adjacent entries may reveal larger
* allocatable segments, but we aren't aware of those until loading
* the space map into a range tree.
*/
if (msp->ms_loaded) {
msp->ms_max_size = metaslab_largest_allocatable(msp);
} else {
msp->ms_max_size = MAX(msp->ms_max_size,
metaslab_largest_unflushed_free(msp));
}
/*
* Segment-based weighting requires space map histogram support.
*/
if (zfs_metaslab_segment_weight_enabled &&
spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
(msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
sizeof (space_map_phys_t))) {
weight = metaslab_segment_weight(msp);
} else {
weight = metaslab_space_weight(msp);
}
return (weight);
}
void
metaslab_recalculate_weight_and_sort(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/* note: we preserve the mask (e.g. indication of primary, etc..) */
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(msp->ms_group, msp,
metaslab_weight(msp, B_FALSE) | was_active);
}
static int
metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
int allocator, uint64_t activation_weight)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're activating for the claim code, we don't want to actually
* set the metaslab up for a specific allocator.
*/
if (activation_weight == METASLAB_WEIGHT_CLAIM) {
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort(mg, msp, msp->ms_weight |
activation_weight);
return (0);
}
metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
&mga->mga_primary : &mga->mga_secondary);
mutex_enter(&mg->mg_lock);
if (*mspp != NULL) {
mutex_exit(&mg->mg_lock);
return (EEXIST);
}
*mspp = msp;
ASSERT3S(msp->ms_allocator, ==, -1);
msp->ms_allocator = allocator;
msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort_impl(mg, msp,
msp->ms_weight | activation_weight);
mutex_exit(&mg->mg_lock);
return (0);
}
static int
metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The current metaslab is already activated for us so there
* is nothing to do. Already activated though, doesn't mean
* that this metaslab is activated for our allocator nor our
* requested activation weight. The metaslab could have started
* as an active one for our allocator but changed allocators
* while we were waiting to grab its ms_lock or we stole it
* [see find_valid_metaslab()]. This means that there is a
* possibility of passivating a metaslab of another allocator
* or from a different activation mask, from this thread.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
ASSERT(msp->ms_loaded);
return (0);
}
int error = metaslab_load(msp);
if (error != 0) {
metaslab_group_sort(msp->ms_group, msp, 0);
return (error);
}
/*
* When entering metaslab_load() we may have dropped the
* ms_lock because we were loading this metaslab, or we
* were waiting for another thread to load it for us. In
* that scenario, we recheck the weight of the metaslab
* to see if it was activated by another thread.
*
* If the metaslab was activated for another allocator or
* it was activated with a different activation weight (e.g.
* we wanted to make it a primary but it was activated as
* secondary) we return error (EBUSY).
*
* If the metaslab was activated for the same allocator
* and requested activation mask, skip activating it.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
if (msp->ms_allocator != allocator)
return (EBUSY);
if ((msp->ms_weight & activation_weight) == 0)
return (SET_ERROR(EBUSY));
EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
msp->ms_primary);
return (0);
}
/*
* If the metaslab has literally 0 space, it will have weight 0. In
* that case, don't bother activating it. This can happen if the
* metaslab had space during find_valid_metaslab, but another thread
* loaded it and used all that space while we were waiting to grab the
* lock.
*/
if (msp->ms_weight == 0) {
- ASSERT0(range_tree_space(msp->ms_allocatable));
+ ASSERT0(zfs_range_tree_space(msp->ms_allocatable));
return (SET_ERROR(ENOSPC));
}
if ((error = metaslab_activate_allocator(msp->ms_group, msp,
allocator, activation_weight)) != 0) {
return (error);
}
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
return (0);
}
static void
metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
metaslab_group_sort(mg, msp, weight);
return;
}
mutex_enter(&mg->mg_lock);
ASSERT3P(msp->ms_group, ==, mg);
ASSERT3S(0, <=, msp->ms_allocator);
ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
if (msp->ms_primary) {
ASSERT3P(mga->mga_primary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
mga->mga_primary = NULL;
} else {
ASSERT3P(mga->mga_secondary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
mga->mga_secondary = NULL;
}
msp->ms_allocator = -1;
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_passivate(metaslab_t *msp, uint64_t weight)
{
uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
/*
* If size < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. In that case, it had better be empty,
* or we would be leaving space on the table.
*/
ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
size >= SPA_MINBLOCKSIZE ||
- range_tree_space(msp->ms_allocatable) == 0);
+ zfs_range_tree_space(msp->ms_allocatable) == 0);
ASSERT0(weight & METASLAB_ACTIVE_MASK);
ASSERT(msp->ms_activation_weight != 0);
msp->ms_activation_weight = 0;
metaslab_passivate_allocator(msp->ms_group, msp, weight);
ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
}
/*
* Segment-based metaslabs are activated once and remain active until
* we either fail an allocation attempt (similar to space-based metaslabs)
* or have exhausted the free space in zfs_metaslab_switch_threshold
* buckets since the metaslab was activated. This function checks to see
* if we've exhausted the zfs_metaslab_switch_threshold buckets in the
* metaslab and passivates it proactively. This will allow us to select a
* metaslab with a larger contiguous region, if any, remaining within this
* metaslab group. If we're in sync pass > 1, then we continue using this
* metaslab so that we don't dirty more block and cause more sync passes.
*/
static void
metaslab_segment_may_passivate(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
return;
/*
* Since we are in the middle of a sync pass, the most accurate
* information that is accessible to us is the in-core range tree
* histogram; calculate the new weight based on that information.
*/
uint64_t weight = metaslab_weight_from_range_tree(msp);
int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
int current_idx = WEIGHT_GET_INDEX(weight);
if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
metaslab_passivate(msp, weight);
}
static void
metaslab_preload(void *arg)
{
metaslab_t *msp = arg;
metaslab_class_t *mc = msp->ms_group->mg_class;
spa_t *spa = mc->mc_spa;
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
mutex_enter(&msp->ms_lock);
(void) metaslab_load(msp);
metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_lock);
spl_fstrans_unmark(cookie);
}
static void
metaslab_group_preload(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_t *msp;
avl_tree_t *t = &mg->mg_metaslab_tree;
int m = 0;
if (spa_shutting_down(spa) || !metaslab_preload_enabled)
return;
mutex_enter(&mg->mg_lock);
/*
* Load the next potential metaslabs
*/
for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
ASSERT3P(msp->ms_group, ==, mg);
/*
* We preload only the maximum number of metaslabs specified
* by metaslab_preload_limit. If a metaslab is being forced
* to condense then we preload it too. This will ensure
* that force condensing happens in the next txg.
*/
if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
continue;
}
VERIFY(taskq_dispatch(spa->spa_metaslab_taskq, metaslab_preload,
msp, TQ_SLEEP | (m <= mg->mg_allocators ? TQ_FRONT : 0))
!= TASKQID_INVALID);
}
mutex_exit(&mg->mg_lock);
}
/*
* Determine if the space map's on-disk footprint is past our tolerance for
* inefficiency. We would like to use the following criteria to make our
* decision:
*
* 1. Do not condense if the size of the space map object would dramatically
* increase as a result of writing out the free space range tree.
*
* 2. Condense if the on on-disk space map representation is at least
* zfs_condense_pct/100 times the size of the optimal representation
* (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
*
* 3. Do not condense if the on-disk size of the space map does not actually
* decrease.
*
* Unfortunately, we cannot compute the on-disk size of the space map in this
* context because we cannot accurately compute the effects of compression, etc.
* Instead, we apply the heuristic described in the block comment for
* zfs_metaslab_condense_block_threshold - we only condense if the space used
* is greater than a threshold number of blocks.
*/
static boolean_t
metaslab_should_condense(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
vdev_t *vd = msp->ms_group->mg_vd;
uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
/*
* We always condense metaslabs that are empty and metaslabs for
* which a condense request has been made.
*/
- if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
+ if (zfs_range_tree_numsegs(msp->ms_allocatable) == 0 ||
msp->ms_condense_wanted)
return (B_TRUE);
uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
uint64_t object_size = space_map_length(sm);
uint64_t optimal_size = space_map_estimate_optimal_size(sm,
msp->ms_allocatable, SM_NO_VDEVID);
return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
object_size > zfs_metaslab_condense_block_threshold * record_size);
}
/*
* Condense the on-disk space map representation to its minimized form.
* The minimized form consists of a small number of allocations followed
* by the entries of the free range tree (ms_allocatable). The condensed
* spacemap contains all the entries of previous TXGs (including those in
* the pool-wide log spacemaps; thus this is effectively a superset of
* metaslab_flush()), but this TXG's entries still need to be written.
*/
static void
metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
{
- range_tree_t *condense_tree;
+ zfs_range_tree_t *condense_tree;
space_map_t *sm = msp->ms_sm;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_sm != NULL);
/*
* In order to condense the space map, we need to change it so it
* only describes which segments are currently allocated and free.
*
* All the current free space resides in the ms_allocatable, all
* the ms_defer trees, and all the ms_allocating trees. We ignore
* ms_freed because it is empty because we're in sync pass 1. We
* ignore ms_freeing because these changes are not yet reflected
* in the spacemap (they will be written later this txg).
*
* So to truncate the space map to represent all the entries of
* previous TXGs we do the following:
*
* 1] We create a range tree (condense tree) that is 100% empty.
* 2] We add to it all segments found in the ms_defer trees
* as those segments are marked as free in the original space
* map. We do the same with the ms_allocating trees for the same
* reason. Adding these segments should be a relatively
* inexpensive operation since we expect these trees to have a
* small number of nodes.
* 3] We vacate any unflushed allocs, since they are not frees we
* need to add to the condense tree. Then we vacate any
* unflushed frees as they should already be part of ms_allocatable.
* 4] At this point, we would ideally like to add all segments
* in the ms_allocatable tree from the condense tree. This way
* we would write all the entries of the condense tree as the
* condensed space map, which would only contain freed
* segments with everything else assumed to be allocated.
*
* Doing so can be prohibitively expensive as ms_allocatable can
* be large, and therefore computationally expensive to add to
* the condense_tree. Instead we first sync out an entry marking
* everything as allocated, then the condense_tree and then the
* ms_allocatable, in the condensed space map. While this is not
* optimal, it is typically close to optimal and more importantly
* much cheaper to compute.
*
* 5] Finally, as both of the unflushed trees were written to our
* new and condensed metaslab space map, we basically flushed
* all the unflushed changes to disk, thus we call
* metaslab_flush_update().
*/
ASSERT3U(spa_sync_pass(spa), ==, 1);
- ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
+ ASSERT(zfs_range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
"spa %s, smp size %llu, segments %llu, forcing condense=%s",
(u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
- (u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
+ (u_longlong_t)zfs_range_tree_numsegs(msp->ms_allocatable),
msp->ms_condense_wanted ? "TRUE" : "FALSE");
msp->ms_condense_wanted = B_FALSE;
- range_seg_type_t type;
+ zfs_range_seg_type_t type;
uint64_t shift, start;
type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
&start, &shift);
- condense_tree = range_tree_create(NULL, type, NULL, start, shift);
+ condense_tree = zfs_range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
- range_tree_walk(msp->ms_defer[t],
- range_tree_add, condense_tree);
+ zfs_range_tree_walk(msp->ms_defer[t],
+ zfs_range_tree_add, condense_tree);
}
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
- range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
- range_tree_add, condense_tree);
+ zfs_range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
+ zfs_range_tree_add, condense_tree);
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
- range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
- range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
/*
* We're about to drop the metaslab's lock thus allowing other
* consumers to change it's content. Set the metaslab's ms_condensing
* flag to ensure that allocations on this metaslab do not occur
* while we're in the middle of committing it to disk. This is only
* critical for ms_allocatable as all other range trees use per TXG
* views of their content.
*/
msp->ms_condensing = B_TRUE;
mutex_exit(&msp->ms_lock);
uint64_t object = space_map_object(msp->ms_sm);
space_map_truncate(sm,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
/*
* space_map_truncate() may have reallocated the spacemap object.
* If so, update the vdev_ms_array.
*/
if (space_map_object(msp->ms_sm) != object) {
object = space_map_object(msp->ms_sm);
dmu_write(spa->spa_meta_objset,
msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &object, tx);
}
/*
* Note:
* When the log space map feature is enabled, each space map will
* always have ALLOCS followed by FREES for each sync pass. This is
* typically true even when the log space map feature is disabled,
* except from the case where a metaslab goes through metaslab_sync()
* and gets condensed. In that case the metaslab's space map will have
* ALLOCS followed by FREES (due to condensing) followed by ALLOCS
* followed by FREES (due to space_map_write() in metaslab_sync()) for
* sync pass 1.
*/
- range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
- shift);
- range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
+ zfs_range_tree_t *tmp_tree = zfs_range_tree_create(NULL, type, NULL,
+ start, shift);
+ zfs_range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
- range_tree_vacate(condense_tree, NULL, NULL);
- range_tree_destroy(condense_tree);
- range_tree_vacate(tmp_tree, NULL, NULL);
- range_tree_destroy(tmp_tree);
+ zfs_range_tree_vacate(condense_tree, NULL, NULL);
+ zfs_range_tree_destroy(condense_tree);
+ zfs_range_tree_vacate(tmp_tree, NULL, NULL);
+ zfs_range_tree_destroy(tmp_tree);
mutex_enter(&msp->ms_lock);
msp->ms_condensing = B_FALSE;
metaslab_flush_update(msp, tx);
}
static void
metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
- ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
- ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
+ ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs));
+ ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees));
mutex_enter(&spa->spa_flushed_ms_lock);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
metaslab_set_unflushed_dirty(msp, B_TRUE);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_increment_current_mscount(spa);
spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
}
void
metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
- ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
- ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
+ ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs));
+ ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees));
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
/* update metaslab's position in our flushing tree */
uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
metaslab_set_unflushed_dirty(msp, dirty);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
/* update metaslab counts of spa_log_sm_t nodes */
spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
spa_log_sm_increment_current_mscount(spa);
/* update log space map summary */
spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
ms_prev_flushed_dirty);
spa_log_summary_add_flushed_metaslab(spa, dirty);
/* cleanup obsolete logs if any */
spa_cleanup_old_sm_logs(spa, tx);
}
/*
* Called when the metaslab has been flushed (its own spacemap now reflects
* all the contents of the pool-wide spacemap log). Updates the metaslab's
* metadata and any pool-wide related log space map data (e.g. summary,
* obsolete logs, etc..) to reflect that.
*/
static void
metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
/*
* Just because a metaslab got flushed, that doesn't mean that
* it will pass through metaslab_sync_done(). Thus, make sure to
* update ms_synced_length here in case it doesn't.
*/
msp->ms_synced_length = space_map_length(msp->ms_sm);
/*
* We may end up here from metaslab_condense() without the
* feature being active. In that case this is a no-op.
*/
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
metaslab_unflushed_txg(msp) == 0)
return;
metaslab_unflushed_bump(msp, tx, B_FALSE);
}
boolean_t
metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
/*
* There is nothing wrong with flushing the same metaslab twice, as
* this codepath should work on that case. However, the current
* flushing scheme makes sure to avoid this situation as we would be
* making all these calls without having anything meaningful to write
* to disk. We assert this behavior here.
*/
ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
/*
* We can not flush while loading, because then we would
* not load the ms_unflushed_{allocs,frees}.
*/
if (msp->ms_loading)
return (B_FALSE);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
/*
* Metaslab condensing is effectively flushing. Therefore if the
* metaslab can be condensed we can just condense it instead of
* flushing it.
*
* Note that metaslab_condense() does call metaslab_flush_update()
* so we can just return immediately after condensing. We also
* don't need to care about setting ms_flushing or broadcasting
* ms_flush_cv, even if we temporarily drop the ms_lock in
* metaslab_condense(), as the metaslab is already loaded.
*/
if (msp->ms_loaded && metaslab_should_condense(msp)) {
metaslab_group_t *mg = msp->ms_group;
/*
* For all histogram operations below refer to the
* comments of metaslab_sync() where we follow a
* similar procedure.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
metaslab_condense(msp, tx);
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
- ASSERT(range_tree_is_empty(msp->ms_freed));
+ ASSERT(zfs_range_tree_is_empty(msp->ms_freed));
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
/*
* Since we recreated the histogram (and potentially
* the ms_sm too while condensing) ensure that the
* weight is updated too because we are not guaranteed
* that this metaslab is dirty and will go through
* metaslab_sync_done().
*/
metaslab_recalculate_weight_and_sort(msp);
return (B_TRUE);
}
msp->ms_flushing = B_TRUE;
uint64_t sm_len_before = space_map_length(msp->ms_sm);
mutex_exit(&msp->ms_lock);
space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
uint64_t sm_len_after = space_map_length(msp->ms_sm);
if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
"appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
- (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
- (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
+ (u_longlong_t)zfs_range_tree_space(
+ msp->ms_unflushed_allocs),
+ (u_longlong_t)zfs_range_tree_space(
+ msp->ms_unflushed_frees),
(u_longlong_t)(sm_len_after - sm_len_before));
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
- range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
- range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
metaslab_flush_update(msp, tx);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
msp->ms_flushing = B_FALSE;
cv_broadcast(&msp->ms_flush_cv);
return (B_TRUE);
}
/*
* Write a metaslab to disk in the context of the specified transaction group.
*/
void
metaslab_sync(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
- range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
+ zfs_range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
dmu_tx_t *tx;
ASSERT(!vd->vdev_ishole);
/*
* This metaslab has just been added so there's no work to do now.
*/
if (msp->ms_new) {
- ASSERT0(range_tree_space(alloctree));
- ASSERT0(range_tree_space(msp->ms_freeing));
- ASSERT0(range_tree_space(msp->ms_freed));
- ASSERT0(range_tree_space(msp->ms_checkpointing));
- ASSERT0(range_tree_space(msp->ms_trim));
+ ASSERT0(zfs_range_tree_space(alloctree));
+ ASSERT0(zfs_range_tree_space(msp->ms_freeing));
+ ASSERT0(zfs_range_tree_space(msp->ms_freed));
+ ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
+ ASSERT0(zfs_range_tree_space(msp->ms_trim));
return;
}
/*
* Normally, we don't want to process a metaslab if there are no
* allocations or frees to perform. However, if the metaslab is being
* forced to condense, it's loaded and we're not beyond the final
* dirty txg, we need to let it through. Not condensing beyond the
* final dirty txg prevents an issue where metaslabs that need to be
* condensed but were loaded for other reasons could cause a panic
* here. By only checking the txg in that branch of the conditional,
* we preserve the utility of the VERIFY statements in all other
* cases.
*/
- if (range_tree_is_empty(alloctree) &&
- range_tree_is_empty(msp->ms_freeing) &&
- range_tree_is_empty(msp->ms_checkpointing) &&
+ if (zfs_range_tree_is_empty(alloctree) &&
+ zfs_range_tree_is_empty(msp->ms_freeing) &&
+ zfs_range_tree_is_empty(msp->ms_checkpointing) &&
!(msp->ms_loaded && msp->ms_condense_wanted &&
txg <= spa_final_dirty_txg(spa)))
return;
VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
/*
* The only state that can actually be changing concurrently
* with metaslab_sync() is the metaslab's ms_allocatable. No
* other thread can be modifying this txg's alloc, freeing,
* freed, or space_map_phys_t. We drop ms_lock whenever we
* could call into the DMU, because the DMU can call down to
* us (e.g. via zio_free()) at any time.
*
* The spa_vdev_remove_thread() can be reading metaslab state
* concurrently, and it is locked out by the ms_sync_lock.
* Note that the ms_lock is insufficient for this, because it
* is dropped by space_map_write().
*/
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
/*
* Generate a log space map if one doesn't exist already.
*/
spa_generate_syncing_log_sm(spa, tx);
if (msp->ms_sm == NULL) {
uint64_t new_object = space_map_alloc(mos,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log :
zfs_metaslab_sm_blksz_no_log, tx);
VERIFY3U(new_object, !=, 0);
dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &new_object, tx);
VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
msp->ms_start, msp->ms_size, vd->vdev_ashift));
ASSERT(msp->ms_sm != NULL);
- ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
- ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
+ ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs));
+ ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees));
ASSERT0(metaslab_allocated_space(msp));
}
- if (!range_tree_is_empty(msp->ms_checkpointing) &&
+ if (!zfs_range_tree_is_empty(msp->ms_checkpointing) &&
vd->vdev_checkpoint_sm == NULL) {
ASSERT(spa_has_checkpoint(spa));
uint64_t new_object = space_map_alloc(mos,
zfs_vdev_standard_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* We save the space map object as an entry in vdev_top_zap
* so it can be retrieved when the pool is reopened after an
* export or through zdb.
*/
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (new_object), 1, &new_object, tx));
}
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* Note: metaslab_condense() clears the space map's histogram.
* Therefore we must verify and remove this histogram before
* condensing.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
metaslab_should_condense(msp))
metaslab_condense(msp, tx);
/*
* We'll be going to disk to sync our space accounting, thus we
* drop the ms_lock during that time so allocations coming from
* open-context (ZIL) for future TXGs do not block.
*/
mutex_exit(&msp->ms_lock);
space_map_t *log_sm = spa_syncing_log_sm(spa);
if (log_sm != NULL) {
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
if (metaslab_unflushed_txg(msp) == 0)
metaslab_unflushed_add(msp, tx);
else if (!metaslab_unflushed_dirty(msp))
metaslab_unflushed_bump(msp, tx, B_TRUE);
space_map_write(log_sm, alloctree, SM_ALLOC,
vd->vdev_id, tx);
space_map_write(log_sm, msp->ms_freeing, SM_FREE,
vd->vdev_id, tx);
mutex_enter(&msp->ms_lock);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
- range_tree_remove_xor_add(alloctree,
+ zfs_range_tree_remove_xor_add(alloctree,
msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
- range_tree_remove_xor_add(msp->ms_freeing,
+ zfs_range_tree_remove_xor_add(msp->ms_freeing,
msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
spa->spa_unflushed_stats.sus_memused +=
metaslab_unflushed_changes_memused(msp);
} else {
ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
}
- msp->ms_allocated_space += range_tree_space(alloctree);
+ msp->ms_allocated_space += zfs_range_tree_space(alloctree);
ASSERT3U(msp->ms_allocated_space, >=,
- range_tree_space(msp->ms_freeing));
- msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
+ zfs_range_tree_space(msp->ms_freeing));
+ msp->ms_allocated_space -= zfs_range_tree_space(msp->ms_freeing);
- if (!range_tree_is_empty(msp->ms_checkpointing)) {
+ if (!zfs_range_tree_is_empty(msp->ms_checkpointing)) {
ASSERT(spa_has_checkpoint(spa));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since we are doing writes to disk and the ms_checkpointing
* tree won't be changing during that time, we drop the
* ms_lock while writing to the checkpoint space map, for the
* same reason mentioned above.
*/
mutex_exit(&msp->ms_lock);
space_map_write(vd->vdev_checkpoint_sm,
msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
spa->spa_checkpoint_info.sci_dspace +=
- range_tree_space(msp->ms_checkpointing);
+ zfs_range_tree_space(msp->ms_checkpointing);
vd->vdev_stat.vs_checkpoint_space +=
- range_tree_space(msp->ms_checkpointing);
+ zfs_range_tree_space(msp->ms_checkpointing);
ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
-space_map_allocated(vd->vdev_checkpoint_sm));
- range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
}
if (msp->ms_loaded) {
/*
* When the space map is loaded, we have an accurate
* histogram in the range tree. This gives us an opportunity
* to bring the space map's histogram up-to-date so we clear
* it first before updating it.
*/
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
/*
* Since we've cleared the histogram we need to add back
* any free space that has already been processed, plus
* any deferred space. This allows the on-disk histogram
* to accurately reflect all free space even if some space
* is not yet available for allocation (i.e. deferred).
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
/*
* Add back any deferred free space that has not been
* added back into the in-core free tree yet. This will
* ensure that we don't end up with a space map histogram
* that is completely empty unless the metaslab is fully
* allocated.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
}
/*
* Always add the free space from this sync pass to the space
* map histogram. We want to make sure that the on-disk histogram
* accounts for all free space. If the space map is not loaded,
* then we will lose some accuracy but will correct it the next
* time we load the space map.
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
/*
* For sync pass 1, we avoid traversing this txg's free range tree
* and instead will just swap the pointers for freeing and freed.
* We can safely do this since the freed_tree is guaranteed to be
* empty on the initial pass.
*
* Keep in mind that even if we are currently using a log spacemap
* we want current frees to end up in the ms_allocatable (but not
* get appended to the ms_sm) so their ranges can be reused as usual.
*/
if (spa_sync_pass(spa) == 1) {
- range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
+ zfs_range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
ASSERT0(msp->ms_allocated_this_txg);
} else {
- range_tree_vacate(msp->ms_freeing,
- range_tree_add, msp->ms_freed);
+ zfs_range_tree_vacate(msp->ms_freeing,
+ zfs_range_tree_add, msp->ms_freed);
}
- msp->ms_allocated_this_txg += range_tree_space(alloctree);
- range_tree_vacate(alloctree, NULL, NULL);
+ msp->ms_allocated_this_txg += zfs_range_tree_space(alloctree);
+ zfs_range_tree_vacate(alloctree, NULL, NULL);
- ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
- ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
+ ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
+ ASSERT0(zfs_range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
& TXG_MASK]));
- ASSERT0(range_tree_space(msp->ms_freeing));
- ASSERT0(range_tree_space(msp->ms_checkpointing));
+ ASSERT0(zfs_range_tree_space(msp->ms_freeing));
+ ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
mutex_exit(&msp->ms_lock);
/*
* Verify that the space map object ID has been recorded in the
* vdev_ms_array.
*/
uint64_t object;
VERIFY0(dmu_read(mos, vd->vdev_ms_array,
msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
VERIFY3U(object, ==, space_map_object(msp->ms_sm));
mutex_exit(&msp->ms_sync_lock);
dmu_tx_commit(tx);
}
static void
metaslab_evict(metaslab_t *msp, uint64_t txg)
{
if (!msp->ms_loaded || msp->ms_disabled != 0)
return;
for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
- VERIFY0(range_tree_space(
+ VERIFY0(zfs_range_tree_space(
msp->ms_allocating[(txg + t) & TXG_MASK]));
}
if (msp->ms_allocator != -1)
metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
if (!metaslab_debug_unload)
metaslab_unload(msp);
}
/*
* Called after a transaction group has completely synced to mark
* all of the metaslab's free space as usable.
*/
void
metaslab_sync_done(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
- range_tree_t **defer_tree;
+ zfs_range_tree_t **defer_tree;
int64_t alloc_delta, defer_delta;
boolean_t defer_allowed = B_TRUE;
ASSERT(!vd->vdev_ishole);
mutex_enter(&msp->ms_lock);
if (msp->ms_new) {
/* this is a new metaslab, add its capacity to the vdev */
metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
/* there should be no allocations nor frees at this point */
VERIFY0(msp->ms_allocated_this_txg);
- VERIFY0(range_tree_space(msp->ms_freed));
+ VERIFY0(zfs_range_tree_space(msp->ms_freed));
}
- ASSERT0(range_tree_space(msp->ms_freeing));
- ASSERT0(range_tree_space(msp->ms_checkpointing));
+ ASSERT0(zfs_range_tree_space(msp->ms_freeing));
+ ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
metaslab_class_get_alloc(spa_normal_class(spa));
if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing ||
vd->vdev_rz_expanding) {
defer_allowed = B_FALSE;
}
defer_delta = 0;
alloc_delta = msp->ms_allocated_this_txg -
- range_tree_space(msp->ms_freed);
+ zfs_range_tree_space(msp->ms_freed);
if (defer_allowed) {
- defer_delta = range_tree_space(msp->ms_freed) -
- range_tree_space(*defer_tree);
+ defer_delta = zfs_range_tree_space(msp->ms_freed) -
+ zfs_range_tree_space(*defer_tree);
} else {
- defer_delta -= range_tree_space(*defer_tree);
+ defer_delta -= zfs_range_tree_space(*defer_tree);
}
metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
defer_delta, 0);
if (spa_syncing_log_sm(spa) == NULL) {
/*
* If there's a metaslab_load() in progress and we don't have
* a log space map, it means that we probably wrote to the
* metaslab's space map. If this is the case, we need to
* make sure that we wait for the load to complete so that we
* have a consistent view at the in-core side of the metaslab.
*/
metaslab_load_wait(msp);
} else {
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
}
/*
* When auto-trimming is enabled, free ranges which are added to
* ms_allocatable are also be added to ms_trim. The ms_trim tree is
* periodically consumed by the vdev_autotrim_thread() which issues
* trims for all ranges and then vacates the tree. The ms_trim tree
* can be discarded at any time with the sole consequence of recent
* frees not being trimmed.
*/
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
- range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
+ zfs_range_tree_walk(*defer_tree, zfs_range_tree_add,
+ msp->ms_trim);
if (!defer_allowed) {
- range_tree_walk(msp->ms_freed, range_tree_add,
+ zfs_range_tree_walk(msp->ms_freed, zfs_range_tree_add,
msp->ms_trim);
}
} else {
- range_tree_vacate(msp->ms_trim, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
}
/*
* Move the frees from the defer_tree back to the free
* range tree (if it's loaded). Swap the freed_tree and
* the defer_tree -- this is safe to do because we've
* just emptied out the defer_tree.
*/
- range_tree_vacate(*defer_tree,
- msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
+ zfs_range_tree_vacate(*defer_tree,
+ msp->ms_loaded ? zfs_range_tree_add : NULL, msp->ms_allocatable);
if (defer_allowed) {
- range_tree_swap(&msp->ms_freed, defer_tree);
+ zfs_range_tree_swap(&msp->ms_freed, defer_tree);
} else {
- range_tree_vacate(msp->ms_freed,
- msp->ms_loaded ? range_tree_add : NULL,
+ zfs_range_tree_vacate(msp->ms_freed,
+ msp->ms_loaded ? zfs_range_tree_add : NULL,
msp->ms_allocatable);
}
msp->ms_synced_length = space_map_length(msp->ms_sm);
msp->ms_deferspace += defer_delta;
ASSERT3S(msp->ms_deferspace, >=, 0);
ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
if (msp->ms_deferspace != 0) {
/*
* Keep syncing this metaslab until all deferred frees
* are back in circulation.
*/
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
}
metaslab_aux_histograms_update_done(msp, defer_allowed);
if (msp->ms_new) {
msp->ms_new = B_FALSE;
mutex_enter(&mg->mg_lock);
mg->mg_ms_ready++;
mutex_exit(&mg->mg_lock);
}
/*
* Re-sort metaslab within its group now that we've adjusted
* its allocatable space.
*/
metaslab_recalculate_weight_and_sort(msp);
- ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
- ASSERT0(range_tree_space(msp->ms_freeing));
- ASSERT0(range_tree_space(msp->ms_freed));
- ASSERT0(range_tree_space(msp->ms_checkpointing));
+ ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
+ ASSERT0(zfs_range_tree_space(msp->ms_freeing));
+ ASSERT0(zfs_range_tree_space(msp->ms_freed));
+ ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
msp->ms_allocating_total -= msp->ms_allocated_this_txg;
msp->ms_allocated_this_txg = 0;
mutex_exit(&msp->ms_lock);
}
void
metaslab_sync_reassess(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_class->mc_spa;
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
- metaslab_group_alloc_update(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
+ metaslab_group_alloc_update(mg);
/*
* Preload the next potential metaslabs but only on active
* metaslab groups. We can get into a state where the metaslab
* is no longer active since we dirty metaslabs as we remove a
* a device, thus potentially making the metaslab group eligible
* for preloading.
*/
if (mg->mg_activation_count > 0) {
metaslab_group_preload(mg);
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
}
/*
* When writing a ditto block (i.e. more than one DVA for a given BP) on
* the same vdev as an existing DVA of this BP, then try to allocate it
* on a different metaslab than existing DVAs (i.e. a unique metaslab).
*/
static boolean_t
metaslab_is_unique(metaslab_t *msp, dva_t *dva)
{
uint64_t dva_ms_id;
if (DVA_GET_ASIZE(dva) == 0)
return (B_TRUE);
if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
return (B_TRUE);
dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
return (msp->ms_id != dva_ms_id);
}
/*
* ==========================================================================
* Metaslab allocation tracing facility
* ==========================================================================
*/
/*
* Add an allocation trace element to the allocation tracing list.
*/
static void
metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
int allocator)
{
metaslab_alloc_trace_t *mat;
if (!metaslab_trace_enabled)
return;
/*
* When the tracing list reaches its maximum we remove
* the second element in the list before adding a new one.
* By removing the second element we preserve the original
* entry as a clue to what allocations steps have already been
* performed.
*/
if (zal->zal_size == metaslab_trace_max_entries) {
metaslab_alloc_trace_t *mat_next;
#ifdef ZFS_DEBUG
panic("too many entries in allocation list");
#endif
METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
zal->zal_size--;
mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
list_remove(&zal->zal_list, mat_next);
kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
}
mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
list_link_init(&mat->mat_list_node);
mat->mat_mg = mg;
mat->mat_msp = msp;
mat->mat_size = psize;
mat->mat_dva_id = dva_id;
mat->mat_offset = offset;
mat->mat_weight = 0;
mat->mat_allocator = allocator;
if (msp != NULL)
mat->mat_weight = msp->ms_weight;
/*
* The list is part of the zio so locking is not required. Only
* a single thread will perform allocations for a given zio.
*/
list_insert_tail(&zal->zal_list, mat);
zal->zal_size++;
ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
}
void
metaslab_trace_init(zio_alloc_list_t *zal)
{
list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
offsetof(metaslab_alloc_trace_t, mat_list_node));
zal->zal_size = 0;
}
void
metaslab_trace_fini(zio_alloc_list_t *zal)
{
metaslab_alloc_trace_t *mat;
while ((mat = list_remove_head(&zal->zal_list)) != NULL)
kmem_cache_free(metaslab_alloc_trace_cache, mat);
list_destroy(&zal->zal_list);
zal->zal_size = 0;
}
/*
* ==========================================================================
* Metaslab block operations
* ==========================================================================
*/
static void
metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag,
int flags, int allocator)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
}
static void
metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
metaslab_class_allocator_t *mca =
&mg->mg_class->mc_allocator[allocator];
uint64_t max = mg->mg_max_alloc_queue_depth;
uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
while (cur < max) {
if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
cur, cur + 1) == cur) {
atomic_inc_64(&mca->mca_alloc_max_slots);
return;
}
cur = mga->mga_cur_max_alloc_queue_depth;
}
}
void
metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag,
int flags, int allocator, boolean_t io_complete)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
if (io_complete)
metaslab_group_increment_qdepth(mg, allocator);
}
void
metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag,
int allocator)
{
#ifdef ZFS_DEBUG
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
for (int d = 0; d < ndvas; d++) {
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
}
#endif
}
static uint64_t
metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
{
uint64_t start;
- range_tree_t *rt = msp->ms_allocatable;
+ zfs_range_tree_t *rt = msp->ms_allocatable;
metaslab_class_t *mc = msp->ms_group->mg_class;
ASSERT(MUTEX_HELD(&msp->ms_lock));
VERIFY(!msp->ms_condensing);
VERIFY0(msp->ms_disabled);
VERIFY0(msp->ms_new);
start = mc->mc_ops->msop_alloc(msp, size);
if (start != -1ULL) {
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
- VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
- range_tree_remove(rt, start, size);
- range_tree_clear(msp->ms_trim, start, size);
+ VERIFY3U(zfs_range_tree_space(rt) - size, <=, msp->ms_size);
+ zfs_range_tree_remove(rt, start, size);
+ zfs_range_tree_clear(msp->ms_trim, start, size);
- if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
+ if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
- range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
+ zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK], start,
+ size);
msp->ms_allocating_total += size;
/* Track the last successful allocation */
msp->ms_alloc_txg = txg;
metaslab_verify_space(msp, txg);
}
/*
* Now that we've attempted the allocation we need to update the
* metaslab's maximum block size since it may have changed.
*/
msp->ms_max_size = metaslab_largest_allocatable(msp);
return (start);
}
/*
* Find the metaslab with the highest weight that is less than what we've
* already tried. In the common case, this means that we will examine each
* metaslab at most once. Note that concurrent callers could reorder metaslabs
* by activation/passivation once we have dropped the mg_lock. If a metaslab is
* activated by another thread, and we fail to allocate from the metaslab we
* have selected, we may not try the newly-activated metaslab, and instead
* activate another metaslab. This is not optimal, but generally does not cause
* any problems (a possible exception being if every metaslab is completely full
* except for the newly-activated metaslab which we fail to examine).
*/
static metaslab_t *
find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
boolean_t *was_active)
{
avl_index_t idx;
avl_tree_t *t = &mg->mg_metaslab_tree;
metaslab_t *msp = avl_find(t, search, &idx);
if (msp == NULL)
msp = avl_nearest(t, idx, AVL_AFTER);
uint_t tries = 0;
for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
int i;
if (!try_hard && tries > zfs_metaslab_find_max_tries) {
METASLABSTAT_BUMP(metaslabstat_too_many_tries);
return (NULL);
}
tries++;
if (!metaslab_should_allocate(msp, asize, try_hard)) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
continue;
}
/*
* If the selected metaslab is condensing or disabled, or
* hasn't gone through a metaslab_sync_done(), then skip it.
*/
if (msp->ms_condensing || msp->ms_disabled > 0 || msp->ms_new)
continue;
*was_active = msp->ms_allocator != -1;
/*
* If we're activating as primary, this is our first allocation
* from this disk, so we don't need to check how close we are.
* If the metaslab under consideration was already active,
* we're getting desperate enough to steal another allocator's
* metaslab, so we still don't care about distances.
*/
if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
break;
for (i = 0; i < d; i++) {
if (want_unique &&
!metaslab_is_unique(msp, &dva[i]))
break; /* try another metaslab */
}
if (i == d)
break;
}
if (msp != NULL) {
search->ms_weight = msp->ms_weight;
search->ms_start = msp->ms_start + 1;
search->ms_allocator = msp->ms_allocator;
search->ms_primary = msp->ms_primary;
}
return (msp);
}
static void
metaslab_active_mask_verify(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
return;
if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(!msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY3S(msp->ms_allocator, ==, -1);
return;
}
}
static uint64_t
metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
metaslab_t *msp = NULL;
uint64_t offset = -1ULL;
uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
for (int i = 0; i < d; i++) {
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_SECONDARY;
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_CLAIM;
break;
}
}
/*
* If we don't have enough metaslabs active to fill the entire array, we
* just use the 0th slot.
*/
if (mg->mg_ms_ready < mg->mg_allocators * 3)
allocator = 0;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
search->ms_weight = UINT64_MAX;
search->ms_start = 0;
/*
* At the end of the metaslab tree are the already-active metaslabs,
* first the primaries, then the secondaries. When we resume searching
* through the tree, we need to consider ms_allocator and ms_primary so
* we start in the location right after where we left off, and don't
* accidentally loop forever considering the same metaslabs.
*/
search->ms_allocator = -1;
search->ms_primary = B_TRUE;
for (;;) {
boolean_t was_active = B_FALSE;
mutex_enter(&mg->mg_lock);
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
mga->mga_primary != NULL) {
msp = mga->mga_primary;
/*
* Even though we don't hold the ms_lock for the
* primary metaslab, those fields should not
* change while we hold the mg_lock. Thus it is
* safe to make assertions on them.
*/
ASSERT(msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
mga->mga_secondary != NULL) {
msp = mga->mga_secondary;
/*
* See comment above about the similar assertions
* for the primary metaslab.
*/
ASSERT(!msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else {
msp = find_valid_metaslab(mg, activation_weight, dva, d,
want_unique, asize, allocator, try_hard, zal,
search, &was_active);
}
mutex_exit(&mg->mg_lock);
if (msp == NULL) {
kmem_free(search, sizeof (*search));
return (-1ULL);
}
mutex_enter(&msp->ms_lock);
metaslab_active_mask_verify(msp);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE3(ms__activation__attempt,
metaslab_t *, msp, uint64_t, activation_weight,
boolean_t, was_active);
#endif
/*
* Ensure that the metaslab we have selected is still
* capable of handling our request. It's possible that
* another thread may have changed the weight while we
* were blocked on the metaslab lock. We check the
* active status first to see if we need to set_selected_txg
* a new metaslab.
*/
if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
ASSERT3S(msp->ms_allocator, ==, -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If the metaslab was activated for another allocator
* while we were waiting in the ms_lock above, or it's
* a primary and we're seeking a secondary (or vice versa),
* we go back and select a new metaslab.
*/
if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
(msp->ms_allocator != -1) &&
(msp->ms_allocator != allocator || ((activation_weight ==
METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
ASSERT(msp->ms_loaded);
ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
msp->ms_allocator != -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* This metaslab was used for claiming regions allocated
* by the ZIL during pool import. Once these regions are
* claimed we don't need to keep the CLAIM bit set
* anymore. Passivate this metaslab to zero its activation
* mask.
*/
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
activation_weight != METASLAB_WEIGHT_CLAIM) {
ASSERT(msp->ms_loaded);
ASSERT3S(msp->ms_allocator, ==, -1);
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_WEIGHT_CLAIM);
mutex_exit(&msp->ms_lock);
continue;
}
metaslab_set_selected_txg(msp, txg);
int activation_error =
metaslab_activate(msp, allocator, activation_weight);
metaslab_active_mask_verify(msp);
/*
* If the metaslab was activated by another thread for
* another allocator or activation_weight (EBUSY), or it
* failed because another metaslab was assigned as primary
* for this allocator (EEXIST) we continue using this
* metaslab for our allocation, rather than going on to a
* worse metaslab (we waited for that metaslab to be loaded
* after all).
*
* If the activation failed due to an I/O error or ENOSPC we
* skip to the next metaslab.
*/
boolean_t activated;
if (activation_error == 0) {
activated = B_TRUE;
} else if (activation_error == EBUSY ||
activation_error == EEXIST) {
activated = B_FALSE;
} else {
mutex_exit(&msp->ms_lock);
continue;
}
ASSERT(msp->ms_loaded);
/*
* Now that we have the lock, recheck to see if we should
* continue to use this metaslab for this allocation. The
* the metaslab is now loaded so metaslab_should_allocate()
* can accurately determine if the allocation attempt should
* proceed.
*/
if (!metaslab_should_allocate(msp, asize, try_hard)) {
/* Passivate this metaslab and select a new one. */
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
goto next;
}
/*
* If this metaslab is currently condensing then pick again
* as we can't manipulate this metaslab until it's committed
* to disk. If this metaslab is being initialized, we shouldn't
* allocate from it since the allocated region might be
* overwritten after allocation.
*/
if (msp->ms_condensing) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_CONDENSING, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
} else if (msp->ms_disabled > 0) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_DISABLED, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
}
offset = metaslab_block_alloc(msp, asize, txg);
metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
if (offset != -1ULL) {
/* Proactively passivate the metaslab, if needed */
if (activated)
metaslab_segment_may_passivate(msp);
break;
}
next:
ASSERT(msp->ms_loaded);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
uint64_t, asize);
#endif
/*
* We were unable to allocate from this metaslab so determine
* a new weight for this metaslab. Now that we have loaded
* the metaslab we can provide a better hint to the metaslab
* selector.
*
* For space-based metaslabs, we use the maximum block size.
* This information is only available when the metaslab
* is loaded and is more accurate than the generic free
* space weight that was calculated by metaslab_weight().
* This information allows us to quickly compare the maximum
* available allocation in the metaslab to the allocation
* size being requested.
*
* For segment-based metaslabs, determine the new weight
* based on the highest bucket in the range tree. We
* explicitly use the loaded segment weight (i.e. the range
* tree histogram) since it contains the space that is
* currently available for allocation and is accurate
* even within a sync pass.
*/
uint64_t weight;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
weight = metaslab_largest_allocatable(msp);
WEIGHT_SET_SPACEBASED(weight);
} else {
weight = metaslab_weight_from_range_tree(msp);
}
if (activated) {
metaslab_passivate(msp, weight);
} else {
/*
* For the case where we use the metaslab that is
* active for another allocator we want to make
* sure that we retain the activation mask.
*
* Note that we could attempt to use something like
* metaslab_recalculate_weight_and_sort() that
* retains the activation mask here. That function
* uses metaslab_weight() to set the weight though
* which is not as accurate as the calculations
* above.
*/
weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(mg, msp, weight);
}
metaslab_active_mask_verify(msp);
/*
* We have just failed an allocation attempt, check
* that metaslab_should_allocate() agrees. Otherwise,
* we may end up in an infinite loop retrying the same
* metaslab.
*/
ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
mutex_exit(&msp->ms_lock);
}
mutex_exit(&msp->ms_lock);
kmem_free(search, sizeof (*search));
return (offset);
}
static uint64_t
metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
uint64_t offset;
offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
dva, d, allocator, try_hard);
mutex_enter(&mg->mg_lock);
if (offset == -1ULL) {
mg->mg_failed_allocations++;
metaslab_trace_add(zal, mg, NULL, asize, d,
TRACE_GROUP_FAILURE, allocator);
if (asize == SPA_GANGBLOCKSIZE) {
/*
* This metaslab group was unable to allocate
* the minimum gang block size so it must be out of
* space. We must notify the allocation throttle
* to start skipping allocation attempts to this
* metaslab group until more space becomes available.
* Note: this failure cannot be caused by the
* allocation throttle since the allocation throttle
* is only responsible for skipping devices and
* not failing block allocations.
*/
mg->mg_no_free_space = B_TRUE;
}
}
mg->mg_allocations++;
mutex_exit(&mg->mg_lock);
return (offset);
}
/*
* Allocate a block for the specified i/o.
*/
int
metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
zio_alloc_list_t *zal, int allocator)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
metaslab_group_t *mg, *rotor;
vdev_t *vd;
boolean_t try_hard = B_FALSE;
ASSERT(!DVA_IS_VALID(&dva[d]));
/*
* For testing, make some blocks above a certain size be gang blocks.
* This will result in more split blocks when using device removal,
* and a large number of split blocks coupled with ztest-induced
* damage can result in extremely long reconstruction times. This
* will also test spilling from special to normal.
*/
if (psize >= metaslab_force_ganging &&
metaslab_force_ganging_pct > 0 &&
(random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) {
metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
allocator);
return (SET_ERROR(ENOSPC));
}
/*
* Start at the rotor and loop through all mgs until we find something.
* Note that there's no locking on mca_rotor or mca_aliquot because
* nothing actually breaks if we miss a few updates -- we just won't
* allocate quite as evenly. It all balances out over time.
*
* If we are doing ditto or log blocks, try to spread them across
* consecutive vdevs. If we're forced to reuse a vdev before we've
* allocated all of our ditto blocks, then try and spread them out on
* that vdev as much as possible. If it turns out to not be possible,
* gradually lower our standards until anything becomes acceptable.
* Also, allocating on consecutive vdevs (as opposed to random vdevs)
* gives us hope of containing our fault domains to something we're
* able to reason about. Otherwise, any two top-level vdev failures
* will guarantee the loss of data. With consecutive allocation,
* only two adjacent top-level vdev failures will result in data loss.
*
* If we are doing gang blocks (hintdva is non-NULL), try to keep
* ourselves on the same vdev as our gang block header. That
* way, we can hope for locality in vdev_cache, plus it makes our
* fault domains something tractable.
*/
if (hintdva) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
/*
* It's possible the vdev we're using as the hint no
* longer exists or its mg has been closed (e.g. by
* device removal). Consult the rotor when
* all else fails.
*/
if (vd != NULL && vd->vdev_mg != NULL) {
mg = vdev_get_mg(vd, mc);
if (flags & METASLAB_HINTBP_AVOID)
mg = mg->mg_next;
} else {
mg = mca->mca_rotor;
}
} else if (d != 0) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
mg = vd->vdev_mg->mg_next;
} else {
ASSERT(mca->mca_rotor != NULL);
mg = mca->mca_rotor;
}
/*
* If the hint put us into the wrong metaslab class, or into a
* metaslab group that has been passivated, just follow the rotor.
*/
if (mg->mg_class != mc || mg->mg_activation_count <= 0)
mg = mca->mca_rotor;
rotor = mg;
top:
do {
boolean_t allocatable;
ASSERT(mg->mg_activation_count == 1);
vd = mg->mg_vd;
/*
* Don't allocate from faulted devices.
*/
if (try_hard) {
spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
allocatable = vdev_allocatable(vd);
spa_config_exit(spa, SCL_ZIO, FTAG);
} else {
allocatable = vdev_allocatable(vd);
}
/*
* Determine if the selected metaslab group is eligible
* for allocations. If we're ganging then don't allow
* this metaslab group to skip allocations since that would
* inadvertently return ENOSPC and suspend the pool
* even though space is still available.
*/
if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
allocatable = metaslab_group_allocatable(mg, rotor,
flags, psize, allocator, d);
}
if (!allocatable) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_NOT_ALLOCATABLE, allocator);
goto next;
}
/*
* Avoid writing single-copy data to an unhealthy,
* non-redundant vdev, unless we've already tried all
* other vdevs.
*/
if (vd->vdev_state < VDEV_STATE_HEALTHY &&
d == 0 && !try_hard && vd->vdev_children == 0) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_VDEV_ERROR, allocator);
goto next;
}
ASSERT(mg->mg_class == mc);
uint64_t asize = vdev_psize_to_asize_txg(vd, psize, txg);
ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
/*
* If we don't need to try hard, then require that the
* block be on a different metaslab from any other DVAs
* in this BP (unique=true). If we are trying hard, then
* allow any metaslab to be used (unique=false).
*/
uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
!try_hard, dva, d, allocator, try_hard);
if (offset != -1ULL) {
/*
* If we've just selected this metaslab group,
* figure out whether the corresponding vdev is
* over- or under-used relative to the pool,
* and set an allocation bias to even it out.
*
* Bias is also used to compensate for unequally
* sized vdevs so that space is allocated fairly.
*/
if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
vdev_stat_t *vs = &vd->vdev_stat;
int64_t vs_free = vs->vs_space - vs->vs_alloc;
int64_t mc_free = mc->mc_space - mc->mc_alloc;
int64_t ratio;
/*
* Calculate how much more or less we should
* try to allocate from this device during
* this iteration around the rotor.
*
* This basically introduces a zero-centered
* bias towards the devices with the most
* free space, while compensating for vdev
* size differences.
*
* Examples:
* vdev V1 = 16M/128M
* vdev V2 = 16M/128M
* ratio(V1) = 100% ratio(V2) = 100%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/128M
* ratio(V1) = 127% ratio(V2) = 72%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/512M
* ratio(V1) = 40% ratio(V2) = 160%
*/
ratio = (vs_free * mc->mc_alloc_groups * 100) /
(mc_free + 1);
mg->mg_bias = ((ratio - 100) *
(int64_t)mg->mg_aliquot) / 100;
} else if (!metaslab_bias_enabled) {
mg->mg_bias = 0;
}
if ((flags & METASLAB_ZIL) ||
atomic_add_64_nv(&mca->mca_aliquot, asize) >=
mg->mg_aliquot + mg->mg_bias) {
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
}
DVA_SET_VDEV(&dva[d], vd->vdev_id);
DVA_SET_OFFSET(&dva[d], offset);
DVA_SET_GANG(&dva[d],
((flags & METASLAB_GANG_HEADER) ? 1 : 0));
DVA_SET_ASIZE(&dva[d], asize);
return (0);
}
next:
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
} while ((mg = mg->mg_next) != rotor);
/*
* If we haven't tried hard, perhaps do so now.
*/
if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
psize <= 1 << spa->spa_min_ashift)) {
METASLABSTAT_BUMP(metaslabstat_try_hard);
try_hard = B_TRUE;
goto top;
}
memset(&dva[d], 0, sizeof (dva_t));
metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
return (SET_ERROR(ENOSPC));
}
void
metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
boolean_t checkpoint)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
ASSERT(vdev_is_concrete(vd));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
metaslab_check_free_impl(vd, offset, asize);
mutex_enter(&msp->ms_lock);
- if (range_tree_is_empty(msp->ms_freeing) &&
- range_tree_is_empty(msp->ms_checkpointing)) {
+ if (zfs_range_tree_is_empty(msp->ms_freeing) &&
+ zfs_range_tree_is_empty(msp->ms_checkpointing)) {
vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
}
if (checkpoint) {
ASSERT(spa_has_checkpoint(spa));
- range_tree_add(msp->ms_checkpointing, offset, asize);
+ zfs_range_tree_add(msp->ms_checkpointing, offset, asize);
} else {
- range_tree_add(msp->ms_freeing, offset, asize);
+ zfs_range_tree_add(msp->ms_freeing, offset, asize);
}
mutex_exit(&msp->ms_lock);
}
void
metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset;
boolean_t *checkpoint = arg;
ASSERT3P(checkpoint, !=, NULL);
if (vd->vdev_ops->vdev_op_remap != NULL)
vdev_indirect_mark_obsolete(vd, offset, size);
else
metaslab_free_impl(vd, offset, size, *checkpoint);
}
static void
metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
boolean_t checkpoint)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
return;
if (spa->spa_vdev_removal != NULL &&
spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
vdev_is_concrete(vd)) {
/*
* Note: we check if the vdev is concrete because when
* we complete the removal, we first change the vdev to be
* an indirect vdev (in open context), and then (in syncing
* context) clear spa_vdev_removal.
*/
free_from_removing_vdev(vd, offset, size);
} else if (vd->vdev_ops->vdev_op_remap != NULL) {
vdev_indirect_mark_obsolete(vd, offset, size);
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_free_impl_cb, &checkpoint);
} else {
metaslab_free_concrete(vd, offset, size, checkpoint);
}
}
typedef struct remap_blkptr_cb_arg {
blkptr_t *rbca_bp;
spa_remap_cb_t rbca_cb;
vdev_t *rbca_remap_vd;
uint64_t rbca_remap_offset;
void *rbca_cb_arg;
} remap_blkptr_cb_arg_t;
static void
remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
remap_blkptr_cb_arg_t *rbca = arg;
blkptr_t *bp = rbca->rbca_bp;
/* We can not remap split blocks. */
if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
return;
ASSERT0(inner_offset);
if (rbca->rbca_cb != NULL) {
/*
* At this point we know that we are not handling split
* blocks and we invoke the callback on the previous
* vdev which must be indirect.
*/
ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
/* set up remap_blkptr_cb_arg for the next call */
rbca->rbca_remap_vd = vd;
rbca->rbca_remap_offset = offset;
}
/*
* The phys birth time is that of dva[0]. This ensures that we know
* when each dva was written, so that resilver can determine which
* blocks need to be scrubbed (i.e. those written during the time
* the vdev was offline). It also ensures that the key used in
* the ARC hash table is unique (i.e. dva[0] + phys_birth). If
* we didn't change the phys_birth, a lookup in the ARC for a
* remapped BP could find the data that was previously stored at
* this vdev + offset.
*/
vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
DVA_GET_VDEV(&bp->blk_dva[0]));
vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
uint64_t physical_birth = vdev_indirect_births_physbirth(vib,
DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
BP_SET_PHYSICAL_BIRTH(bp, physical_birth);
DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
DVA_SET_OFFSET(&bp->blk_dva[0], offset);
}
/*
* If the block pointer contains any indirect DVAs, modify them to refer to
* concrete DVAs. Note that this will sometimes not be possible, leaving
* the indirect DVA in place. This happens if the indirect DVA spans multiple
* segments in the mapping (i.e. it is a "split block").
*
* If the BP was remapped, calls the callback on the original dva (note the
* callback can be called multiple times if the original indirect DVA refers
* to another indirect DVA, etc).
*
* Returns TRUE if the BP was remapped.
*/
boolean_t
spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
{
remap_blkptr_cb_arg_t rbca;
if (!zfs_remap_blkptr_enable)
return (B_FALSE);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
return (B_FALSE);
/*
* Dedup BP's can not be remapped, because ddt_phys_select() depends
* on DVA[0] being the same in the BP as in the DDT (dedup table).
*/
if (BP_GET_DEDUP(bp))
return (B_FALSE);
/*
* Gang blocks can not be remapped, because
* zio_checksum_gang_verifier() depends on the DVA[0] that's in
* the BP used to read the gang block header (GBH) being the same
* as the DVA[0] that we allocated for the GBH.
*/
if (BP_IS_GANG(bp))
return (B_FALSE);
/*
* Embedded BP's have no DVA to remap.
*/
if (BP_GET_NDVAS(bp) < 1)
return (B_FALSE);
/*
* Note: we only remap dva[0]. If we remapped other dvas, we
* would no longer know what their phys birth txg is.
*/
dva_t *dva = &bp->blk_dva[0];
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops->vdev_op_remap == NULL)
return (B_FALSE);
rbca.rbca_bp = bp;
rbca.rbca_cb = callback;
rbca.rbca_remap_vd = vd;
rbca.rbca_remap_offset = offset;
rbca.rbca_cb_arg = arg;
/*
* remap_blkptr_cb() will be called in order for each level of
* indirection, until a concrete vdev is reached or a split block is
* encountered. old_vd and old_offset are updated within the callback
* as we go from the one indirect vdev to the next one (either concrete
* or indirect again) in that order.
*/
vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
/* Check if the DVA wasn't remapped because it is a split block */
if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
return (B_FALSE);
return (B_TRUE);
}
/*
* Undo the allocation of a DVA which happened in the given transaction group.
*/
void
metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
metaslab_t *msp;
vdev_t *vd;
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (txg > spa_freeze_txg(spa))
return;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
(offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
(u_longlong_t)vdev, (u_longlong_t)offset,
(u_longlong_t)size);
return;
}
ASSERT(!vd->vdev_removing);
ASSERT(vdev_is_concrete(vd));
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
- range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
+ zfs_range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total -= size;
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
- VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
+ VERIFY3U(zfs_range_tree_space(msp->ms_allocatable) + size, <=,
msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
- range_tree_add(msp->ms_allocatable, offset, size);
+ zfs_range_tree_add(msp->ms_allocatable, offset, size);
mutex_exit(&msp->ms_lock);
}
/*
* Free the block represented by the given DVA.
*/
void
metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, vdev);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (DVA_GET_GANG(dva)) {
size = vdev_gang_header_asize(vd);
}
metaslab_free_impl(vd, offset, size, checkpoint);
}
/*
* Reserve some allocation slots. The reservation system must be called
* before we call into the allocator. If there aren't any available slots
* then the I/O will be throttled until an I/O completes and its slots are
* freed up. The function returns true if it was successful in placing
* the reservation.
*/
boolean_t
metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
zio_t *zio, int flags)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
uint64_t max = mca->mca_alloc_max_slots;
ASSERT(mc->mc_alloc_throttle_enabled);
if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
/*
* The potential race between _count() and _add() is covered
* by the allocator lock in most cases, or irrelevant due to
* GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
* But even if we assume some other non-existing scenario, the
* worst that can happen is few more I/Os get to allocation
* earlier, that is not a problem.
*
* We reserve the slots individually so that we can unreserve
* them individually when an I/O completes.
*/
zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio);
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
return (B_TRUE);
}
return (B_FALSE);
}
void
metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
int allocator, zio_t *zio)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
ASSERT(mc->mc_alloc_throttle_enabled);
zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio);
}
static int
metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
uint64_t txg)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
int error = 0;
if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
return (SET_ERROR(ENXIO));
ASSERT3P(vd->vdev_ms, !=, NULL);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
if (error == EBUSY) {
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
error = 0;
}
}
if (error == 0 &&
- !range_tree_contains(msp->ms_allocatable, offset, size))
+ !zfs_range_tree_contains(msp->ms_allocatable, offset, size))
error = SET_ERROR(ENOENT);
if (error || txg == 0) { /* txg == 0 indicates dry run */
mutex_exit(&msp->ms_lock);
return (error);
}
VERIFY(!msp->ms_condensing);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
- VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
+ VERIFY3U(zfs_range_tree_space(msp->ms_allocatable) - size, <=,
msp->ms_size);
- range_tree_remove(msp->ms_allocatable, offset, size);
- range_tree_clear(msp->ms_trim, offset, size);
+ zfs_range_tree_remove(msp->ms_allocatable, offset, size);
+ zfs_range_tree_clear(msp->ms_trim, offset, size);
if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (!multilist_link_active(&msp->ms_class_txg_node)) {
msp->ms_selected_txg = txg;
multilist_sublist_insert_head(mls, msp);
}
multilist_sublist_unlock(mls);
- if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
+ if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(vd, VDD_METASLAB, msp, txg);
- range_tree_add(msp->ms_allocating[txg & TXG_MASK],
+ zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total += size;
}
mutex_exit(&msp->ms_lock);
return (0);
}
typedef struct metaslab_claim_cb_arg_t {
uint64_t mcca_txg;
int mcca_error;
} metaslab_claim_cb_arg_t;
static void
metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset;
metaslab_claim_cb_arg_t *mcca_arg = arg;
if (mcca_arg->mcca_error == 0) {
mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
size, mcca_arg->mcca_txg);
}
}
int
metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
{
if (vd->vdev_ops->vdev_op_remap != NULL) {
metaslab_claim_cb_arg_t arg;
/*
* Only zdb(8) can claim on indirect vdevs. This is used
* to detect leaks of mapped space (that are not accounted
* for in the obsolete counts, spacemap, or bpobj).
*/
ASSERT(!spa_writeable(vd->vdev_spa));
arg.mcca_error = 0;
arg.mcca_txg = txg;
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_claim_impl_cb, &arg);
if (arg.mcca_error == 0) {
arg.mcca_error = metaslab_claim_concrete(vd,
offset, size, txg);
}
return (arg.mcca_error);
} else {
return (metaslab_claim_concrete(vd, offset, size, txg));
}
}
/*
* Intent log support: upon opening the pool after a crash, notify the SPA
* of blocks that the intent log has allocated for immediate write, but
* which are still considered free by the SPA because the last transaction
* group didn't commit yet.
*/
static int
metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
return (SET_ERROR(ENXIO));
}
ASSERT(DVA_IS_VALID(dva));
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
return (metaslab_claim_impl(vd, offset, size, txg));
}
int
metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
zio_alloc_list_t *zal, zio_t *zio, int allocator)
{
dva_t *dva = bp->blk_dva;
dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
int error = 0;
ASSERT0(BP_GET_LOGICAL_BIRTH(bp));
ASSERT0(BP_GET_PHYSICAL_BIRTH(bp));
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
if (mc->mc_allocator[allocator].mca_rotor == NULL) {
/* no vdevs in this class */
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (SET_ERROR(ENOSPC));
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
ASSERT(BP_GET_NDVAS(bp) == 0);
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL);
for (int d = 0; d < ndvas; d++) {
error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
txg, flags, zal, allocator);
if (error != 0) {
for (d--; d >= 0; d--) {
metaslab_unalloc_dva(spa, &dva[d], txg);
metaslab_group_alloc_decrement(spa,
DVA_GET_VDEV(&dva[d]), zio, flags,
allocator, B_FALSE);
memset(&dva[d], 0, sizeof (dva_t));
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (error);
} else {
/*
* Update the metaslab group's queue depth
* based on the newly allocated dva.
*/
metaslab_group_alloc_increment(spa,
DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
}
}
ASSERT(error == 0);
ASSERT(BP_GET_NDVAS(bp) == ndvas);
spa_config_exit(spa, SCL_ALLOC, FTAG);
BP_SET_BIRTH(bp, txg, 0);
return (0);
}
void
metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!now || BP_GET_LOGICAL_BIRTH(bp) >= spa_syncing_txg(spa));
/*
* If we have a checkpoint for the pool we need to make sure that
* the blocks that we free that are part of the checkpoint won't be
* reused until the checkpoint is discarded or we revert to it.
*
* The checkpoint flag is passed down the metaslab_free code path
* and is set whenever we want to add a block to the checkpoint's
* accounting. That is, we "checkpoint" blocks that existed at the
* time the checkpoint was created and are therefore referenced by
* the checkpointed uberblock.
*
* Note that, we don't checkpoint any blocks if the current
* syncing txg <= spa_checkpoint_txg. We want these frees to sync
* normally as they will be referenced by the checkpointed uberblock.
*/
boolean_t checkpoint = B_FALSE;
if (BP_GET_LOGICAL_BIRTH(bp) <= spa->spa_checkpoint_txg &&
spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
/*
* At this point, if the block is part of the checkpoint
* there is no way it was created in the current txg.
*/
ASSERT(!now);
ASSERT3U(spa_syncing_txg(spa), ==, txg);
checkpoint = B_TRUE;
}
spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
if (now) {
metaslab_unalloc_dva(spa, &dva[d], txg);
} else {
ASSERT3U(txg, ==, spa_syncing_txg(spa));
metaslab_free_dva(spa, &dva[d], checkpoint);
}
}
spa_config_exit(spa, SCL_FREE, FTAG);
}
int
metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
int error = 0;
ASSERT(!BP_IS_HOLE(bp));
if (txg != 0) {
/*
* First do a dry run to make sure all DVAs are claimable,
* so we don't have to unwind from partial failures below.
*/
if ((error = metaslab_claim(spa, bp, 0)) != 0)
return (error);
}
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
error = metaslab_claim_dva(spa, &dva[d], txg);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
ASSERT(error == 0 || txg == 0);
return (error);
}
static void
metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner, (void) arg;
if (vd->vdev_ops == &vdev_indirect_ops)
return;
metaslab_check_free_impl(vd, offset, size);
}
static void
metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
{
metaslab_t *msp;
spa_t *spa __maybe_unused = vd->vdev_spa;
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
if (vd->vdev_ops->vdev_op_remap != NULL) {
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_check_free_impl_cb, NULL);
return;
}
ASSERT(vdev_is_concrete(vd));
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if (msp->ms_loaded) {
- range_tree_verify_not_present(msp->ms_allocatable,
+ zfs_range_tree_verify_not_present(msp->ms_allocatable,
offset, size);
}
/*
* Check all segments that currently exist in the freeing pipeline.
*
* It would intuitively make sense to also check the current allocating
* tree since metaslab_unalloc_dva() exists for extents that are
* allocated and freed in the same sync pass within the same txg.
* Unfortunately there are places (e.g. the ZIL) where we allocate a
* segment but then we free part of it within the same txg
- * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
+ * [see zil_sync()]. Thus, we don't call zfs_range_tree_verify() in the
* current allocating tree.
*/
- range_tree_verify_not_present(msp->ms_freeing, offset, size);
- range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
- range_tree_verify_not_present(msp->ms_freed, offset, size);
+ zfs_range_tree_verify_not_present(msp->ms_freeing, offset, size);
+ zfs_range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
+ zfs_range_tree_verify_not_present(msp->ms_freed, offset, size);
for (int j = 0; j < TXG_DEFER_SIZE; j++)
- range_tree_verify_not_present(msp->ms_defer[j], offset, size);
- range_tree_verify_not_present(msp->ms_trim, offset, size);
+ zfs_range_tree_verify_not_present(msp->ms_defer[j], offset,
+ size);
+ zfs_range_tree_verify_not_present(msp->ms_trim, offset, size);
mutex_exit(&msp->ms_lock);
}
void
metaslab_check_free(spa_t *spa, const blkptr_t *bp)
{
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (DVA_GET_GANG(&bp->blk_dva[i]))
size = vdev_gang_header_asize(vd);
ASSERT3P(vd, !=, NULL);
metaslab_check_free_impl(vd, offset, size);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
static void
metaslab_group_disable_wait(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
while (mg->mg_disabled_updating) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
}
static void
metaslab_group_disabled_increment(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
ASSERT(mg->mg_disabled_updating);
while (mg->mg_ms_disabled >= max_disabled_ms) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
mg->mg_ms_disabled++;
ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
}
/*
* Mark the metaslab as disabled to prevent any allocations on this metaslab.
* We must also track how many metaslabs are currently disabled within a
* metaslab group and limit them to prevent allocation failures from
* occurring because all metaslabs are disabled.
*/
void
metaslab_disable(metaslab_t *msp)
{
ASSERT(!MUTEX_HELD(&msp->ms_lock));
metaslab_group_t *mg = msp->ms_group;
mutex_enter(&mg->mg_ms_disabled_lock);
/*
* To keep an accurate count of how many threads have disabled
* a specific metaslab group, we only allow one thread to mark
* the metaslab group at a time. This ensures that the value of
* ms_disabled will be accurate when we decide to mark a metaslab
* group as disabled. To do this we force all other threads
* to wait till the metaslab's mg_disabled_updating flag is no
* longer set.
*/
metaslab_group_disable_wait(mg);
mg->mg_disabled_updating = B_TRUE;
if (msp->ms_disabled == 0) {
metaslab_group_disabled_increment(mg);
}
mutex_enter(&msp->ms_lock);
msp->ms_disabled++;
mutex_exit(&msp->ms_lock);
mg->mg_disabled_updating = B_FALSE;
cv_broadcast(&mg->mg_ms_disabled_cv);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
/*
* Wait for the outstanding IO to be synced to prevent newly
* allocated blocks from being overwritten. This used by
* initialize and TRIM which are modifying unallocated space.
*/
if (sync)
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&mg->mg_ms_disabled_lock);
mutex_enter(&msp->ms_lock);
if (--msp->ms_disabled == 0) {
mg->mg_ms_disabled--;
cv_broadcast(&mg->mg_ms_disabled_cv);
if (unload)
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
{
ms->ms_unflushed_dirty = dirty;
}
static void
metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
{
vdev_t *vd = ms->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
metaslab_unflushed_phys_t entry = {
.msp_unflushed_txg = metaslab_unflushed_txg(ms),
};
uint64_t entry_size = sizeof (entry);
uint64_t entry_offset = ms->ms_id * entry_size;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object);
if (err == ENOENT) {
object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object, tx));
} else {
VERIFY0(err);
}
dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
&entry, tx);
}
void
metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
{
ms->ms_unflushed_txg = txg;
metaslab_update_ondisk_flush_data(ms, tx);
}
boolean_t
metaslab_unflushed_dirty(metaslab_t *ms)
{
return (ms->ms_unflushed_dirty);
}
uint64_t
metaslab_unflushed_txg(metaslab_t *ms)
{
return (ms->ms_unflushed_txg);
}
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW,
"Allocation granularity (a.k.a. stripe size)");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
"Load all metaslabs when pool is first opened");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
"Prevent metaslabs from being unloaded");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
"Preload potential metaslabs during reassessment");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_limit, UINT, ZMOD_RW,
"Max number of metaslabs per group to preload");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
"Delay in txgs after metaslab was last used before unloading");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
"Delay in milliseconds after metaslab was last used before unloading");
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
"Percentage of metaslab group size that should be free to make it "
"eligible for allocation");
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
"Percentage of metaslab group size that should be considered eligible "
"for allocations unless all metaslab groups within the metaslab class "
"have also crossed this threshold");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
ZMOD_RW,
"Use the fragmentation metric to prefer less fragmented metaslabs");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
ZMOD_RW, "Fragmentation for metaslab to allow allocation");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
"Prefer metaslabs with lower LBAs");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
"Enable metaslab group biasing");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
ZMOD_RW, "Enable segment-based metaslab selection");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
"Segment-based metaslab selection maximum buckets before switching");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW,
"Blocks larger than this size are sometimes forced to be gang blocks");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW,
"Percentage of large blocks that will be forced to be gang blocks");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
"Max distance (bytes) to search forward before using size tree");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
"When looking in size tree, use largest segment instead of exact fit");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64,
ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
"Percentage of memory that can be used to store metaslab range trees");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
ZMOD_RW, "Try hard to allocate before ganging");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
"Normally only consider this many of the best metaslabs in each vdev");
ZFS_MODULE_PARAM_CALL(zfs, zfs_, active_allocator,
param_set_active_allocator, param_get_charp, ZMOD_RW,
"SPA active allocator");
diff --git a/sys/contrib/openzfs/module/zfs/range_tree.c b/sys/contrib/openzfs/module/zfs/range_tree.c
index 5174e2c46633..8bb9a0724e61 100644
--- a/sys/contrib/openzfs/module/zfs/range_tree.c
+++ b/sys/contrib/openzfs/module/zfs/range_tree.c
@@ -1,867 +1,875 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2019 by Delphix. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zio.h>
#include <sys/range_tree.h>
/*
* Range trees are tree-based data structures that can be used to
* track free space or generally any space allocation information.
* A range tree keeps track of individual segments and automatically
* provides facilities such as adjacent extent merging and extent
* splitting in response to range add/remove requests.
*
* A range tree starts out completely empty, with no segments in it.
- * Adding an allocation via range_tree_add to the range tree can either:
+ * Adding an allocation via zfs_range_tree_add to the range tree can either:
* 1) create a new extent
* 2) extend an adjacent extent
* 3) merge two adjacent extents
- * Conversely, removing an allocation via range_tree_remove can:
+ * Conversely, removing an allocation via zfs_range_tree_remove can:
* 1) completely remove an extent
* 2) shorten an extent (if the allocation was near one of its ends)
* 3) split an extent into two extents, in effect punching a hole
*
* A range tree is also capable of 'bridging' gaps when adding
* allocations. This is useful for cases when close proximity of
* allocations is an important detail that needs to be represented
- * in the range tree. See range_tree_set_gap(). The default behavior
+ * in the range tree. See zfs_range_tree_set_gap(). The default behavior
* is not to bridge gaps (i.e. the maximum allowed gap size is 0).
*
- * In order to traverse a range tree, use either the range_tree_walk()
- * or range_tree_vacate() functions.
+ * In order to traverse a range tree, use either the zfs_range_tree_walk()
+ * or zfs_range_tree_vacate() functions.
*
* To obtain more accurate information on individual segment
* operations that the range tree performs "under the hood", you can
- * specify a set of callbacks by passing a range_tree_ops_t structure
- * to the range_tree_create function. Any callbacks that are non-NULL
+ * specify a set of callbacks by passing a zfs_range_tree_ops_t structure
+ * to the zfs_range_tree_create function. Any callbacks that are non-NULL
* are then called at the appropriate times.
*
* The range tree code also supports a special variant of range trees
* that can bridge small gaps between segments. This kind of tree is used
* by the dsl scanning code to group I/Os into mostly sequential chunks to
* optimize disk performance. The code here attempts to do this with as
* little memory and computational overhead as possible. One limitation of
* this implementation is that segments of range trees with gaps can only
* support removing complete segments.
*/
static inline void
-rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
+zfs_rs_copy(zfs_range_seg_t *src, zfs_range_seg_t *dest, zfs_range_tree_t *rt)
{
- ASSERT3U(rt->rt_type, <, RANGE_SEG_NUM_TYPES);
+ ASSERT3U(rt->rt_type, <, ZFS_RANGE_SEG_NUM_TYPES);
size_t size = 0;
switch (rt->rt_type) {
- case RANGE_SEG32:
- size = sizeof (range_seg32_t);
+ case ZFS_RANGE_SEG32:
+ size = sizeof (zfs_range_seg32_t);
break;
- case RANGE_SEG64:
- size = sizeof (range_seg64_t);
+ case ZFS_RANGE_SEG64:
+ size = sizeof (zfs_range_seg64_t);
break;
- case RANGE_SEG_GAP:
- size = sizeof (range_seg_gap_t);
+ case ZFS_RANGE_SEG_GAP:
+ size = sizeof (zfs_range_seg_gap_t);
break;
default:
__builtin_unreachable();
}
memcpy(dest, src, size);
}
void
-range_tree_stat_verify(range_tree_t *rt)
+zfs_range_tree_stat_verify(zfs_range_tree_t *rt)
{
- range_seg_t *rs;
+ zfs_range_seg_t *rs;
zfs_btree_index_t where;
- uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
+ uint64_t hist[ZFS_RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
int i;
for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL;
rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
- uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
+ uint64_t size = zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt);
int idx = highbit64(size) - 1;
hist[idx]++;
ASSERT3U(hist[idx], !=, 0);
}
- for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
+ for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
if (hist[i] != rt->rt_histogram[i]) {
zfs_dbgmsg("i=%d, hist=%px, hist=%llu, rt_hist=%llu",
i, hist, (u_longlong_t)hist[i],
(u_longlong_t)rt->rt_histogram[i]);
}
VERIFY3U(hist[i], ==, rt->rt_histogram[i]);
}
}
static void
-range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs)
+zfs_range_tree_stat_incr(zfs_range_tree_t *rt, zfs_range_seg_t *rs)
{
- uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
+ uint64_t size = zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt);
int idx = highbit64(size) - 1;
ASSERT(size != 0);
ASSERT3U(idx, <,
sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
rt->rt_histogram[idx]++;
ASSERT3U(rt->rt_histogram[idx], !=, 0);
}
static void
-range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs)
+zfs_range_tree_stat_decr(zfs_range_tree_t *rt, zfs_range_seg_t *rs)
{
- uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
+ uint64_t size = zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt);
int idx = highbit64(size) - 1;
ASSERT(size != 0);
ASSERT3U(idx, <,
sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
ASSERT3U(rt->rt_histogram[idx], !=, 0);
rt->rt_histogram[idx]--;
}
__attribute__((always_inline)) inline
static int
-range_tree_seg32_compare(const void *x1, const void *x2)
+zfs_range_tree_seg32_compare(const void *x1, const void *x2)
{
- const range_seg32_t *r1 = x1;
- const range_seg32_t *r2 = x2;
+ const zfs_range_seg32_t *r1 = x1;
+ const zfs_range_seg32_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
__attribute__((always_inline)) inline
static int
-range_tree_seg64_compare(const void *x1, const void *x2)
+zfs_range_tree_seg64_compare(const void *x1, const void *x2)
{
- const range_seg64_t *r1 = x1;
- const range_seg64_t *r2 = x2;
+ const zfs_range_seg64_t *r1 = x1;
+ const zfs_range_seg64_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
__attribute__((always_inline)) inline
static int
-range_tree_seg_gap_compare(const void *x1, const void *x2)
+zfs_range_tree_seg_gap_compare(const void *x1, const void *x2)
{
- const range_seg_gap_t *r1 = x1;
- const range_seg_gap_t *r2 = x2;
+ const zfs_range_seg_gap_t *r1 = x1;
+ const zfs_range_seg_gap_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
-ZFS_BTREE_FIND_IN_BUF_FUNC(range_tree_seg32_find_in_buf, range_seg32_t,
- range_tree_seg32_compare)
+ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg32_find_in_buf, zfs_range_seg32_t,
+ zfs_range_tree_seg32_compare)
-ZFS_BTREE_FIND_IN_BUF_FUNC(range_tree_seg64_find_in_buf, range_seg64_t,
- range_tree_seg64_compare)
+ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg64_find_in_buf, zfs_range_seg64_t,
+ zfs_range_tree_seg64_compare)
-ZFS_BTREE_FIND_IN_BUF_FUNC(range_tree_seg_gap_find_in_buf, range_seg_gap_t,
- range_tree_seg_gap_compare)
+ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg_gap_find_in_buf,
+ zfs_range_seg_gap_t, zfs_range_tree_seg_gap_compare)
-range_tree_t *
-range_tree_create_gap(const range_tree_ops_t *ops, range_seg_type_t type,
- void *arg, uint64_t start, uint64_t shift, uint64_t gap)
+zfs_range_tree_t *
+zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
+ zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
+ uint64_t gap)
{
- range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP);
+ zfs_range_tree_t *rt = kmem_zalloc(sizeof (zfs_range_tree_t), KM_SLEEP);
ASSERT3U(shift, <, 64);
- ASSERT3U(type, <=, RANGE_SEG_NUM_TYPES);
+ ASSERT3U(type, <=, ZFS_RANGE_SEG_NUM_TYPES);
size_t size;
int (*compare) (const void *, const void *);
bt_find_in_buf_f bt_find;
switch (type) {
- case RANGE_SEG32:
- size = sizeof (range_seg32_t);
- compare = range_tree_seg32_compare;
- bt_find = range_tree_seg32_find_in_buf;
+ case ZFS_RANGE_SEG32:
+ size = sizeof (zfs_range_seg32_t);
+ compare = zfs_range_tree_seg32_compare;
+ bt_find = zfs_range_tree_seg32_find_in_buf;
break;
- case RANGE_SEG64:
- size = sizeof (range_seg64_t);
- compare = range_tree_seg64_compare;
- bt_find = range_tree_seg64_find_in_buf;
+ case ZFS_RANGE_SEG64:
+ size = sizeof (zfs_range_seg64_t);
+ compare = zfs_range_tree_seg64_compare;
+ bt_find = zfs_range_tree_seg64_find_in_buf;
break;
- case RANGE_SEG_GAP:
- size = sizeof (range_seg_gap_t);
- compare = range_tree_seg_gap_compare;
- bt_find = range_tree_seg_gap_find_in_buf;
+ case ZFS_RANGE_SEG_GAP:
+ size = sizeof (zfs_range_seg_gap_t);
+ compare = zfs_range_tree_seg_gap_compare;
+ bt_find = zfs_range_tree_seg_gap_find_in_buf;
break;
default:
panic("Invalid range seg type %d", type);
}
zfs_btree_create(&rt->rt_root, compare, bt_find, size);
rt->rt_ops = ops;
rt->rt_gap = gap;
rt->rt_arg = arg;
rt->rt_type = type;
rt->rt_start = start;
rt->rt_shift = shift;
if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL)
rt->rt_ops->rtop_create(rt, rt->rt_arg);
return (rt);
}
-range_tree_t *
-range_tree_create(const range_tree_ops_t *ops, range_seg_type_t type,
- void *arg, uint64_t start, uint64_t shift)
+zfs_range_tree_t *
+zfs_range_tree_create(const zfs_range_tree_ops_t *ops,
+ zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift)
{
- return (range_tree_create_gap(ops, type, arg, start, shift, 0));
+ return (zfs_range_tree_create_gap(ops, type, arg, start, shift, 0));
}
void
-range_tree_destroy(range_tree_t *rt)
+zfs_range_tree_destroy(zfs_range_tree_t *rt)
{
VERIFY0(rt->rt_space);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL)
rt->rt_ops->rtop_destroy(rt, rt->rt_arg);
zfs_btree_destroy(&rt->rt_root);
kmem_free(rt, sizeof (*rt));
}
void
-range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta)
+zfs_range_tree_adjust_fill(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
+ int64_t delta)
{
- if (delta < 0 && delta * -1 >= rs_get_fill(rs, rt)) {
+ if (delta < 0 && delta * -1 >= zfs_rs_get_fill(rs, rt)) {
zfs_panic_recover("zfs: attempting to decrease fill to or "
"below 0; probable double remove in segment [%llx:%llx]",
- (longlong_t)rs_get_start(rs, rt),
- (longlong_t)rs_get_end(rs, rt));
+ (longlong_t)zfs_rs_get_start(rs, rt),
+ (longlong_t)zfs_rs_get_end(rs, rt));
}
- if (rs_get_fill(rs, rt) + delta > rs_get_end(rs, rt) -
- rs_get_start(rs, rt)) {
+ if (zfs_rs_get_fill(rs, rt) + delta > zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt)) {
zfs_panic_recover("zfs: attempting to increase fill beyond "
"max; probable double add in segment [%llx:%llx]",
- (longlong_t)rs_get_start(rs, rt),
- (longlong_t)rs_get_end(rs, rt));
+ (longlong_t)zfs_rs_get_start(rs, rt),
+ (longlong_t)zfs_rs_get_end(rs, rt));
}
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
- rs_set_fill(rs, rt, rs_get_fill(rs, rt) + delta);
+ zfs_rs_set_fill(rs, rt, zfs_rs_get_fill(rs, rt) + delta);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
}
static void
-range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
+zfs_range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
{
- range_tree_t *rt = arg;
+ zfs_range_tree_t *rt = arg;
zfs_btree_index_t where;
- range_seg_t *rs_before, *rs_after, *rs;
- range_seg_max_t tmp, rsearch;
+ zfs_range_seg_t *rs_before, *rs_after, *rs;
+ zfs_range_seg_max_t tmp, rsearch;
uint64_t end = start + size, gap = rt->rt_gap;
uint64_t bridge_size = 0;
boolean_t merge_before, merge_after;
ASSERT3U(size, !=, 0);
ASSERT3U(fill, <=, size);
ASSERT3U(start + size, >, start);
- rs_set_start(&rsearch, rt, start);
- rs_set_end(&rsearch, rt, end);
+ zfs_rs_set_start(&rsearch, rt, start);
+ zfs_rs_set_end(&rsearch, rt, end);
rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
/*
* If this is a gap-supporting range tree, it is possible that we
* are inserting into an existing segment. In this case simply
* bump the fill count and call the remove / add callbacks. If the
* new range will extend an existing segment, we remove the
* existing one, apply the new extent to it and re-insert it using
* the normal code paths.
*/
if (rs != NULL) {
if (gap == 0) {
zfs_panic_recover("zfs: adding existent segment to "
"range tree (offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size);
return;
}
- uint64_t rstart = rs_get_start(rs, rt);
- uint64_t rend = rs_get_end(rs, rt);
+ uint64_t rstart = zfs_rs_get_start(rs, rt);
+ uint64_t rend = zfs_rs_get_end(rs, rt);
if (rstart <= start && rend >= end) {
- range_tree_adjust_fill(rt, rs, fill);
+ zfs_range_tree_adjust_fill(rt, rs, fill);
return;
}
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
- range_tree_stat_decr(rt, rs);
+ zfs_range_tree_stat_decr(rt, rs);
rt->rt_space -= rend - rstart;
- fill += rs_get_fill(rs, rt);
+ fill += zfs_rs_get_fill(rs, rt);
start = MIN(start, rstart);
end = MAX(end, rend);
size = end - start;
zfs_btree_remove(&rt->rt_root, rs);
- range_tree_add_impl(rt, start, size, fill);
+ zfs_range_tree_add_impl(rt, start, size, fill);
return;
}
ASSERT3P(rs, ==, NULL);
/*
* Determine whether or not we will have to merge with our neighbors.
* If gap != 0, we might need to merge with our neighbors even if we
* aren't directly touching.
*/
zfs_btree_index_t where_before, where_after;
rs_before = zfs_btree_prev(&rt->rt_root, &where, &where_before);
rs_after = zfs_btree_next(&rt->rt_root, &where, &where_after);
- merge_before = (rs_before != NULL && rs_get_end(rs_before, rt) >=
+ merge_before = (rs_before != NULL && zfs_rs_get_end(rs_before, rt) >=
start - gap);
- merge_after = (rs_after != NULL && rs_get_start(rs_after, rt) <= end +
- gap);
+ merge_after = (rs_after != NULL && zfs_rs_get_start(rs_after, rt) <=
+ end + gap);
if (merge_before && gap != 0)
- bridge_size += start - rs_get_end(rs_before, rt);
+ bridge_size += start - zfs_rs_get_end(rs_before, rt);
if (merge_after && gap != 0)
- bridge_size += rs_get_start(rs_after, rt) - end;
+ bridge_size += zfs_rs_get_start(rs_after, rt) - end;
if (merge_before && merge_after) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) {
rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
}
- range_tree_stat_decr(rt, rs_before);
- range_tree_stat_decr(rt, rs_after);
+ zfs_range_tree_stat_decr(rt, rs_before);
+ zfs_range_tree_stat_decr(rt, rs_after);
- rs_copy(rs_after, &tmp, rt);
- uint64_t before_start = rs_get_start_raw(rs_before, rt);
- uint64_t before_fill = rs_get_fill(rs_before, rt);
- uint64_t after_fill = rs_get_fill(rs_after, rt);
+ zfs_rs_copy(rs_after, &tmp, rt);
+ uint64_t before_start = zfs_rs_get_start_raw(rs_before, rt);
+ uint64_t before_fill = zfs_rs_get_fill(rs_before, rt);
+ uint64_t after_fill = zfs_rs_get_fill(rs_after, rt);
zfs_btree_remove_idx(&rt->rt_root, &where_before);
/*
* We have to re-find the node because our old reference is
* invalid as soon as we do any mutating btree operations.
*/
rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after);
ASSERT3P(rs_after, !=, NULL);
- rs_set_start_raw(rs_after, rt, before_start);
- rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
+ zfs_rs_set_start_raw(rs_after, rt, before_start);
+ zfs_rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
rs = rs_after;
} else if (merge_before) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
- range_tree_stat_decr(rt, rs_before);
+ zfs_range_tree_stat_decr(rt, rs_before);
- uint64_t before_fill = rs_get_fill(rs_before, rt);
- rs_set_end(rs_before, rt, end);
- rs_set_fill(rs_before, rt, before_fill + fill);
+ uint64_t before_fill = zfs_rs_get_fill(rs_before, rt);
+ zfs_rs_set_end(rs_before, rt, end);
+ zfs_rs_set_fill(rs_before, rt, before_fill + fill);
rs = rs_before;
} else if (merge_after) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
- range_tree_stat_decr(rt, rs_after);
+ zfs_range_tree_stat_decr(rt, rs_after);
- uint64_t after_fill = rs_get_fill(rs_after, rt);
- rs_set_start(rs_after, rt, start);
- rs_set_fill(rs_after, rt, after_fill + fill);
+ uint64_t after_fill = zfs_rs_get_fill(rs_after, rt);
+ zfs_rs_set_start(rs_after, rt, start);
+ zfs_rs_set_fill(rs_after, rt, after_fill + fill);
rs = rs_after;
} else {
rs = &tmp;
- rs_set_start(rs, rt, start);
- rs_set_end(rs, rt, end);
- rs_set_fill(rs, rt, fill);
+ zfs_rs_set_start(rs, rt, start);
+ zfs_rs_set_end(rs, rt, end);
+ zfs_rs_set_fill(rs, rt, fill);
zfs_btree_add_idx(&rt->rt_root, rs, &where);
}
if (gap != 0) {
- ASSERT3U(rs_get_fill(rs, rt), <=, rs_get_end(rs, rt) -
- rs_get_start(rs, rt));
+ ASSERT3U(zfs_rs_get_fill(rs, rt), <=, zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt));
} else {
- ASSERT3U(rs_get_fill(rs, rt), ==, rs_get_end(rs, rt) -
- rs_get_start(rs, rt));
+ ASSERT3U(zfs_rs_get_fill(rs, rt), ==, zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt));
}
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
- range_tree_stat_incr(rt, rs);
+ zfs_range_tree_stat_incr(rt, rs);
rt->rt_space += size + bridge_size;
}
void
-range_tree_add(void *arg, uint64_t start, uint64_t size)
+zfs_range_tree_add(void *arg, uint64_t start, uint64_t size)
{
- range_tree_add_impl(arg, start, size, size);
+ zfs_range_tree_add_impl(arg, start, size, size);
}
static void
-range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
+zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
boolean_t do_fill)
{
zfs_btree_index_t where;
- range_seg_t *rs;
- range_seg_max_t rsearch, rs_tmp;
+ zfs_range_seg_t *rs;
+ zfs_range_seg_max_t rsearch, rs_tmp;
uint64_t end = start + size;
boolean_t left_over, right_over;
VERIFY3U(size, !=, 0);
VERIFY3U(size, <=, rt->rt_space);
- if (rt->rt_type == RANGE_SEG64)
+ if (rt->rt_type == ZFS_RANGE_SEG64)
ASSERT3U(start + size, >, start);
- rs_set_start(&rsearch, rt, start);
- rs_set_end(&rsearch, rt, end);
+ zfs_rs_set_start(&rsearch, rt, start);
+ zfs_rs_set_end(&rsearch, rt, end);
rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
/* Make sure we completely overlap with someone */
if (rs == NULL) {
zfs_panic_recover("zfs: removing nonexistent segment from "
"range tree (offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size);
return;
}
/*
* Range trees with gap support must only remove complete segments
* from the tree. This allows us to maintain accurate fill accounting
* and to ensure that bridged sections are not leaked. If we need to
* remove less than the full segment, we can only adjust the fill count.
*/
if (rt->rt_gap != 0) {
if (do_fill) {
- if (rs_get_fill(rs, rt) == size) {
- start = rs_get_start(rs, rt);
- end = rs_get_end(rs, rt);
+ if (zfs_rs_get_fill(rs, rt) == size) {
+ start = zfs_rs_get_start(rs, rt);
+ end = zfs_rs_get_end(rs, rt);
size = end - start;
} else {
- range_tree_adjust_fill(rt, rs, -size);
+ zfs_range_tree_adjust_fill(rt, rs, -size);
return;
}
- } else if (rs_get_start(rs, rt) != start ||
- rs_get_end(rs, rt) != end) {
+ } else if (zfs_rs_get_start(rs, rt) != start ||
+ zfs_rs_get_end(rs, rt) != end) {
zfs_panic_recover("zfs: freeing partial segment of "
"gap tree (offset=%llx size=%llx) of "
"(offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size,
- (longlong_t)rs_get_start(rs, rt),
- (longlong_t)rs_get_end(rs, rt) - rs_get_start(rs,
- rt));
+ (longlong_t)zfs_rs_get_start(rs, rt),
+ (longlong_t)zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt));
return;
}
}
- VERIFY3U(rs_get_start(rs, rt), <=, start);
- VERIFY3U(rs_get_end(rs, rt), >=, end);
+ VERIFY3U(zfs_rs_get_start(rs, rt), <=, start);
+ VERIFY3U(zfs_rs_get_end(rs, rt), >=, end);
- left_over = (rs_get_start(rs, rt) != start);
- right_over = (rs_get_end(rs, rt) != end);
+ left_over = (zfs_rs_get_start(rs, rt) != start);
+ right_over = (zfs_rs_get_end(rs, rt) != end);
- range_tree_stat_decr(rt, rs);
+ zfs_range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
if (left_over && right_over) {
- range_seg_max_t newseg;
- rs_set_start(&newseg, rt, end);
- rs_set_end_raw(&newseg, rt, rs_get_end_raw(rs, rt));
- rs_set_fill(&newseg, rt, rs_get_end(rs, rt) - end);
- range_tree_stat_incr(rt, &newseg);
+ zfs_range_seg_max_t newseg;
+ zfs_rs_set_start(&newseg, rt, end);
+ zfs_rs_set_end_raw(&newseg, rt, zfs_rs_get_end_raw(rs, rt));
+ zfs_rs_set_fill(&newseg, rt, zfs_rs_get_end(rs, rt) - end);
+ zfs_range_tree_stat_incr(rt, &newseg);
// This modifies the buffer already inside the range tree
- rs_set_end(rs, rt, start);
+ zfs_rs_set_end(rs, rt, start);
- rs_copy(rs, &rs_tmp, rt);
+ zfs_rs_copy(rs, &rs_tmp, rt);
if (zfs_btree_next(&rt->rt_root, &where, &where) != NULL)
zfs_btree_add_idx(&rt->rt_root, &newseg, &where);
else
zfs_btree_add(&rt->rt_root, &newseg);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, &newseg, rt->rt_arg);
} else if (left_over) {
// This modifies the buffer already inside the range tree
- rs_set_end(rs, rt, start);
- rs_copy(rs, &rs_tmp, rt);
+ zfs_rs_set_end(rs, rt, start);
+ zfs_rs_copy(rs, &rs_tmp, rt);
} else if (right_over) {
// This modifies the buffer already inside the range tree
- rs_set_start(rs, rt, end);
- rs_copy(rs, &rs_tmp, rt);
+ zfs_rs_set_start(rs, rt, end);
+ zfs_rs_copy(rs, &rs_tmp, rt);
} else {
zfs_btree_remove_idx(&rt->rt_root, &where);
rs = NULL;
}
if (rs != NULL) {
/*
* The fill of the leftover segment will always be equal to
* the size, since we do not support removing partial segments
* of range trees with gaps.
*/
- rs_set_fill_raw(rs, rt, rs_get_end_raw(rs, rt) -
- rs_get_start_raw(rs, rt));
- range_tree_stat_incr(rt, &rs_tmp);
+ zfs_zfs_rs_set_fill_raw(rs, rt, zfs_rs_get_end_raw(rs, rt) -
+ zfs_rs_get_start_raw(rs, rt));
+ zfs_range_tree_stat_incr(rt, &rs_tmp);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, &rs_tmp, rt->rt_arg);
}
rt->rt_space -= size;
}
void
-range_tree_remove(void *arg, uint64_t start, uint64_t size)
+zfs_range_tree_remove(void *arg, uint64_t start, uint64_t size)
{
- range_tree_remove_impl(arg, start, size, B_FALSE);
+ zfs_range_tree_remove_impl(arg, start, size, B_FALSE);
}
void
-range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size)
+zfs_range_tree_remove_fill(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{
- range_tree_remove_impl(rt, start, size, B_TRUE);
+ zfs_range_tree_remove_impl(rt, start, size, B_TRUE);
}
void
-range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
+zfs_range_tree_resize_segment(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
uint64_t newstart, uint64_t newsize)
{
- int64_t delta = newsize - (rs_get_end(rs, rt) - rs_get_start(rs, rt));
+ int64_t delta = newsize - (zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt));
- range_tree_stat_decr(rt, rs);
+ zfs_range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
- rs_set_start(rs, rt, newstart);
- rs_set_end(rs, rt, newstart + newsize);
+ zfs_rs_set_start(rs, rt, newstart);
+ zfs_rs_set_end(rs, rt, newstart + newsize);
- range_tree_stat_incr(rt, rs);
+ zfs_range_tree_stat_incr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
rt->rt_space += delta;
}
-static range_seg_t *
-range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size)
+static zfs_range_seg_t *
+zfs_range_tree_find_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{
- range_seg_max_t rsearch;
+ zfs_range_seg_max_t rsearch;
uint64_t end = start + size;
VERIFY(size != 0);
- rs_set_start(&rsearch, rt, start);
- rs_set_end(&rsearch, rt, end);
+ zfs_rs_set_start(&rsearch, rt, start);
+ zfs_rs_set_end(&rsearch, rt, end);
return (zfs_btree_find(&rt->rt_root, &rsearch, NULL));
}
-range_seg_t *
-range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size)
+zfs_range_seg_t *
+zfs_range_tree_find(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{
- if (rt->rt_type == RANGE_SEG64)
+ if (rt->rt_type == ZFS_RANGE_SEG64)
ASSERT3U(start + size, >, start);
- range_seg_t *rs = range_tree_find_impl(rt, start, size);
- if (rs != NULL && rs_get_start(rs, rt) <= start &&
- rs_get_end(rs, rt) >= start + size) {
+ zfs_range_seg_t *rs = zfs_range_tree_find_impl(rt, start, size);
+ if (rs != NULL && zfs_rs_get_start(rs, rt) <= start &&
+ zfs_rs_get_end(rs, rt) >= start + size) {
return (rs);
}
return (NULL);
}
void
-range_tree_verify_not_present(range_tree_t *rt, uint64_t off, uint64_t size)
+zfs_range_tree_verify_not_present(zfs_range_tree_t *rt, uint64_t off,
+ uint64_t size)
{
- range_seg_t *rs = range_tree_find(rt, off, size);
+ zfs_range_seg_t *rs = zfs_range_tree_find(rt, off, size);
if (rs != NULL)
panic("segment already in tree; rs=%p", (void *)rs);
}
boolean_t
-range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size)
+zfs_range_tree_contains(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{
- return (range_tree_find(rt, start, size) != NULL);
+ return (zfs_range_tree_find(rt, start, size) != NULL);
}
/*
* Returns the first subset of the given range which overlaps with the range
* tree. Returns true if there is a segment in the range, and false if there
* isn't.
*/
boolean_t
-range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size,
+zfs_range_tree_find_in(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
uint64_t *ostart, uint64_t *osize)
{
- if (rt->rt_type == RANGE_SEG64)
+ if (rt->rt_type == ZFS_RANGE_SEG64)
ASSERT3U(start + size, >, start);
- range_seg_max_t rsearch;
- rs_set_start(&rsearch, rt, start);
- rs_set_end_raw(&rsearch, rt, rs_get_start_raw(&rsearch, rt) + 1);
+ zfs_range_seg_max_t rsearch;
+ zfs_rs_set_start(&rsearch, rt, start);
+ zfs_rs_set_end_raw(&rsearch, rt, zfs_rs_get_start_raw(&rsearch, rt) +
+ 1);
zfs_btree_index_t where;
- range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
+ zfs_range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
if (rs != NULL) {
*ostart = start;
- *osize = MIN(size, rs_get_end(rs, rt) - start);
+ *osize = MIN(size, zfs_rs_get_end(rs, rt) - start);
return (B_TRUE);
}
rs = zfs_btree_next(&rt->rt_root, &where, &where);
- if (rs == NULL || rs_get_start(rs, rt) > start + size)
+ if (rs == NULL || zfs_rs_get_start(rs, rt) > start + size)
return (B_FALSE);
- *ostart = rs_get_start(rs, rt);
- *osize = MIN(start + size, rs_get_end(rs, rt)) -
- rs_get_start(rs, rt);
+ *ostart = zfs_rs_get_start(rs, rt);
+ *osize = MIN(start + size, zfs_rs_get_end(rs, rt)) -
+ zfs_rs_get_start(rs, rt);
return (B_TRUE);
}
/*
* Ensure that this range is not in the tree, regardless of whether
* it is currently in the tree.
*/
void
-range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size)
+zfs_range_tree_clear(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{
- range_seg_t *rs;
+ zfs_range_seg_t *rs;
if (size == 0)
return;
- if (rt->rt_type == RANGE_SEG64)
+ if (rt->rt_type == ZFS_RANGE_SEG64)
ASSERT3U(start + size, >, start);
- while ((rs = range_tree_find_impl(rt, start, size)) != NULL) {
- uint64_t free_start = MAX(rs_get_start(rs, rt), start);
- uint64_t free_end = MIN(rs_get_end(rs, rt), start + size);
- range_tree_remove(rt, free_start, free_end - free_start);
+ while ((rs = zfs_range_tree_find_impl(rt, start, size)) != NULL) {
+ uint64_t free_start = MAX(zfs_rs_get_start(rs, rt), start);
+ uint64_t free_end = MIN(zfs_rs_get_end(rs, rt), start + size);
+ zfs_range_tree_remove(rt, free_start, free_end - free_start);
}
}
void
-range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst)
+zfs_range_tree_swap(zfs_range_tree_t **rtsrc, zfs_range_tree_t **rtdst)
{
- range_tree_t *rt;
+ zfs_range_tree_t *rt;
- ASSERT0(range_tree_space(*rtdst));
+ ASSERT0(zfs_range_tree_space(*rtdst));
ASSERT0(zfs_btree_numnodes(&(*rtdst)->rt_root));
rt = *rtsrc;
*rtsrc = *rtdst;
*rtdst = rt;
}
void
-range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
+zfs_range_tree_vacate(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
+ void *arg)
{
if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL)
rt->rt_ops->rtop_vacate(rt, rt->rt_arg);
if (func != NULL) {
- range_seg_t *rs;
+ zfs_range_seg_t *rs;
zfs_btree_index_t *cookie = NULL;
while ((rs = zfs_btree_destroy_nodes(&rt->rt_root, &cookie)) !=
NULL) {
- func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
- rs_get_start(rs, rt));
+ func(arg, zfs_rs_get_start(rs, rt),
+ zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt));
}
} else {
zfs_btree_clear(&rt->rt_root);
}
memset(rt->rt_histogram, 0, sizeof (rt->rt_histogram));
rt->rt_space = 0;
}
void
-range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg)
+zfs_range_tree_walk(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
+ void *arg)
{
zfs_btree_index_t where;
- for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where);
+ for (zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where);
rs != NULL; rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
- func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
- rs_get_start(rs, rt));
+ func(arg, zfs_rs_get_start(rs, rt), zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt));
}
}
-range_seg_t *
-range_tree_first(range_tree_t *rt)
+zfs_range_seg_t *
+zfs_range_tree_first(zfs_range_tree_t *rt)
{
return (zfs_btree_first(&rt->rt_root, NULL));
}
uint64_t
-range_tree_space(range_tree_t *rt)
+zfs_range_tree_space(zfs_range_tree_t *rt)
{
return (rt->rt_space);
}
uint64_t
-range_tree_numsegs(range_tree_t *rt)
+zfs_range_tree_numsegs(zfs_range_tree_t *rt)
{
return ((rt == NULL) ? 0 : zfs_btree_numnodes(&rt->rt_root));
}
boolean_t
-range_tree_is_empty(range_tree_t *rt)
+zfs_range_tree_is_empty(zfs_range_tree_t *rt)
{
ASSERT(rt != NULL);
- return (range_tree_space(rt) == 0);
+ return (zfs_range_tree_space(rt) == 0);
}
/*
* Remove any overlapping ranges between the given segment [start, end)
* from removefrom. Add non-overlapping leftovers to addto.
*/
void
-range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
- range_tree_t *removefrom, range_tree_t *addto)
+zfs_range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
+ zfs_range_tree_t *removefrom, zfs_range_tree_t *addto)
{
zfs_btree_index_t where;
- range_seg_max_t starting_rs;
- rs_set_start(&starting_rs, removefrom, start);
- rs_set_end_raw(&starting_rs, removefrom, rs_get_start_raw(&starting_rs,
- removefrom) + 1);
+ zfs_range_seg_max_t starting_rs;
+ zfs_rs_set_start(&starting_rs, removefrom, start);
+ zfs_rs_set_end_raw(&starting_rs, removefrom,
+ zfs_rs_get_start_raw(&starting_rs, removefrom) + 1);
- range_seg_t *curr = zfs_btree_find(&removefrom->rt_root,
+ zfs_range_seg_t *curr = zfs_btree_find(&removefrom->rt_root,
&starting_rs, &where);
if (curr == NULL)
curr = zfs_btree_next(&removefrom->rt_root, &where, &where);
- range_seg_t *next;
+ zfs_range_seg_t *next;
for (; curr != NULL; curr = next) {
if (start == end)
return;
VERIFY3U(start, <, end);
/* there is no overlap */
- if (end <= rs_get_start(curr, removefrom)) {
- range_tree_add(addto, start, end - start);
+ if (end <= zfs_rs_get_start(curr, removefrom)) {
+ zfs_range_tree_add(addto, start, end - start);
return;
}
- uint64_t overlap_start = MAX(rs_get_start(curr, removefrom),
+ uint64_t overlap_start = MAX(zfs_rs_get_start(curr, removefrom),
start);
- uint64_t overlap_end = MIN(rs_get_end(curr, removefrom),
+ uint64_t overlap_end = MIN(zfs_rs_get_end(curr, removefrom),
end);
uint64_t overlap_size = overlap_end - overlap_start;
ASSERT3S(overlap_size, >, 0);
- range_seg_max_t rs;
- rs_copy(curr, &rs, removefrom);
+ zfs_range_seg_max_t rs;
+ zfs_rs_copy(curr, &rs, removefrom);
- range_tree_remove(removefrom, overlap_start, overlap_size);
+ zfs_range_tree_remove(removefrom, overlap_start, overlap_size);
if (start < overlap_start)
- range_tree_add(addto, start, overlap_start - start);
+ zfs_range_tree_add(addto, start, overlap_start - start);
start = overlap_end;
next = zfs_btree_find(&removefrom->rt_root, &rs, &where);
/*
* If we find something here, we only removed part of the
* curr segment. Either there's some left at the end
* because we've reached the end of the range we're removing,
* or there's some left at the start because we started
* partway through the range. Either way, we continue with
* the loop. If it's the former, we'll return at the start of
* the loop, and if it's the latter we'll see if there is more
* area to process.
*/
if (next != NULL) {
- ASSERT(start == end || start == rs_get_end(&rs,
+ ASSERT(start == end || start == zfs_rs_get_end(&rs,
removefrom));
}
next = zfs_btree_next(&removefrom->rt_root, &where, &where);
}
VERIFY3P(curr, ==, NULL);
if (start != end) {
VERIFY3U(start, <, end);
- range_tree_add(addto, start, end - start);
+ zfs_range_tree_add(addto, start, end - start);
} else {
VERIFY3U(start, ==, end);
}
}
/*
* For each entry in rt, if it exists in removefrom, remove it
* from removefrom. Otherwise, add it to addto.
*/
void
-range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom,
- range_tree_t *addto)
+zfs_range_tree_remove_xor_add(zfs_range_tree_t *rt,
+ zfs_range_tree_t *removefrom, zfs_range_tree_t *addto)
{
zfs_btree_index_t where;
- for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
+ for (zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
- range_tree_remove_xor_add_segment(rs_get_start(rs, rt),
- rs_get_end(rs, rt), removefrom, addto);
+ zfs_range_tree_remove_xor_add_segment(zfs_rs_get_start(rs, rt),
+ zfs_rs_get_end(rs, rt), removefrom, addto);
}
}
uint64_t
-range_tree_min(range_tree_t *rt)
+zfs_range_tree_min(zfs_range_tree_t *rt)
{
- range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL);
- return (rs != NULL ? rs_get_start(rs, rt) : 0);
+ zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL);
+ return (rs != NULL ? zfs_rs_get_start(rs, rt) : 0);
}
uint64_t
-range_tree_max(range_tree_t *rt)
+zfs_range_tree_max(zfs_range_tree_t *rt)
{
- range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL);
- return (rs != NULL ? rs_get_end(rs, rt) : 0);
+ zfs_range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL);
+ return (rs != NULL ? zfs_rs_get_end(rs, rt) : 0);
}
uint64_t
-range_tree_span(range_tree_t *rt)
+zfs_range_tree_span(zfs_range_tree_t *rt)
{
- return (range_tree_max(rt) - range_tree_min(rt));
+ return (zfs_range_tree_max(rt) - zfs_range_tree_min(rt));
}
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index 956bae46ef1b..bdeef0959da7 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -1,11087 +1,11087 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2024 by Delphix. All rights reserved.
* Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2018 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
* Copyright (c) 2023, 2024, Klara Inc.
*/
/*
* SPA: Storage Pool Allocator
*
* This file contains all the routines used when modifying on-disk SPA state.
* This includes opening, importing, destroying, exporting a pool, and syncing a
* pool.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_removal.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_draid.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_objset.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/callb.h>
#include <sys/systeminfo.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_scan.h>
#include <sys/zfeature.h>
#include <sys/dsl_destroy.h>
#include <sys/zvol.h>
#ifdef _KERNEL
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/callb.h>
#include <sys/zone.h>
#include <sys/vmsystm.h>
#endif /* _KERNEL */
#include "zfs_prop.h"
#include "zfs_comutil.h"
#include <cityhash.h>
/*
* spa_thread() existed on Illumos as a parent thread for the various worker
* threads that actually run the pool, as a way to both reference the entire
* pool work as a single object, and to share properties like scheduling
* options. It has not yet been adapted to Linux or FreeBSD. This define is
* used to mark related parts of the code to make things easier for the reader,
* and to compile this code out. It can be removed when someone implements it,
* moves it to some Illumos-specific place, or removes it entirely.
*/
#undef HAVE_SPA_THREAD
/*
* The "System Duty Cycle" scheduling class is an Illumos feature to help
* prevent CPU-intensive kernel threads from affecting latency on interactive
* threads. It doesn't exist on Linux or FreeBSD, so the supporting code is
* gated behind a define. On Illumos SDC depends on spa_thread(), but
* spa_thread() also has other uses, so this is a separate define.
*/
#undef HAVE_SYSDC
/*
* The interval, in seconds, at which failed configuration cache file writes
* should be retried.
*/
int zfs_ccw_retry_interval = 300;
typedef enum zti_modes {
ZTI_MODE_FIXED, /* value is # of threads (min 1) */
ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */
ZTI_MODE_SYNC, /* sync thread assigned */
ZTI_MODE_NULL, /* don't create a taskq */
ZTI_NMODES
} zti_modes_t;
#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
#define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 }
#define ZTI_SYNC { ZTI_MODE_SYNC, 0, 1 }
#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
#define ZTI_N(n) ZTI_P(n, 1)
#define ZTI_ONE ZTI_N(1)
typedef struct zio_taskq_info {
zti_modes_t zti_mode;
uint_t zti_value;
uint_t zti_count;
} zio_taskq_info_t;
static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
"iss", "iss_h", "int", "int_h"
};
/*
* This table defines the taskq settings for each ZFS I/O type. When
* initializing a pool, we use this table to create an appropriately sized
* taskq. Some operations are low volume and therefore have a small, static
* number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
* macros. Other operations process a large amount of data; the ZTI_SCALE
* macro causes us to create a taskq oriented for throughput. Some operations
* are so high frequency and short-lived that the taskq itself can become a
* point of lock contention. The ZTI_P(#, #) macro indicates that we need an
* additional degree of parallelism specified by the number of threads per-
* taskq and the number of taskqs; when dispatching an event in this case, the
* particular taskq is chosen at random. ZTI_SCALE uses a number of taskqs
* that scales with the number of CPUs.
*
* The different taskq priorities are to handle the different contexts (issue
* and interrupt) and then to reserve threads for high priority I/Os that
* need to be handled with minimum delay. Illumos taskq has unfair TQ_FRONT
* implementation, so separate high priority threads are used there.
*/
static zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
{ ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */
#ifdef illumos
{ ZTI_SYNC, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */
#else
{ ZTI_SYNC, ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* WRITE */
#endif
{ ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FLUSH */
{ ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
};
static void spa_sync_version(void *arg, dmu_tx_t *tx);
static void spa_sync_props(void *arg, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
static int spa_load_impl(spa_t *spa, spa_import_type_t type,
const char **ereport);
static void spa_vdev_resilver_done(spa_t *spa);
/*
* Percentage of all CPUs that can be used by the metaslab preload taskq.
*/
static uint_t metaslab_preload_pct = 50;
static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */
static uint_t zio_taskq_batch_tpq; /* threads per taskq */
#ifdef HAVE_SYSDC
static const boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
static const uint_t zio_taskq_basedc = 80; /* base duty cycle */
#endif
#ifdef HAVE_SPA_THREAD
static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */
#endif
static uint_t zio_taskq_write_tpq = 16;
/*
* Report any spa_load_verify errors found, but do not fail spa_load.
* This is used by zdb to analyze non-idle pools.
*/
boolean_t spa_load_verify_dryrun = B_FALSE;
/*
* Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ).
* This is used by zdb for spacemaps verification.
*/
boolean_t spa_mode_readable_spacemaps = B_FALSE;
/*
* This (illegal) pool name is used when temporarily importing a spa_t in order
* to get the vdev stats associated with the imported devices.
*/
#define TRYIMPORT_NAME "$import"
/*
* For debugging purposes: print out vdev tree during pool import.
*/
static int spa_load_print_vdev_tree = B_FALSE;
/*
* A non-zero value for zfs_max_missing_tvds means that we allow importing
* pools with missing top-level vdevs. This is strictly intended for advanced
* pool recovery cases since missing data is almost inevitable. Pools with
* missing devices can only be imported read-only for safety reasons, and their
* fail-mode will be automatically set to "continue".
*
* With 1 missing vdev we should be able to import the pool and mount all
* datasets. User data that was not modified after the missing device has been
* added should be recoverable. This means that snapshots created prior to the
* addition of that device should be completely intact.
*
* With 2 missing vdevs, some datasets may fail to mount since there are
* dataset statistics that are stored as regular metadata. Some data might be
* recoverable if those vdevs were added recently.
*
* With 3 or more missing vdevs, the pool is severely damaged and MOS entries
* may be missing entirely. Chances of data recovery are very low. Note that
* there are also risks of performing an inadvertent rewind as we might be
* missing all the vdevs with the latest uberblocks.
*/
uint64_t zfs_max_missing_tvds = 0;
/*
* The parameters below are similar to zfs_max_missing_tvds but are only
* intended for a preliminary open of the pool with an untrusted config which
* might be incomplete or out-dated.
*
* We are more tolerant for pools opened from a cachefile since we could have
* an out-dated cachefile where a device removal was not registered.
* We could have set the limit arbitrarily high but in the case where devices
* are really missing we would want to return the proper error codes; we chose
* SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
* and we get a chance to retrieve the trusted config.
*/
uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
/*
* In the case where config was assembled by scanning device paths (/dev/dsks
* by default) we are less tolerant since all the existing devices should have
* been detected and we want spa_load to return the right error codes.
*/
uint64_t zfs_max_missing_tvds_scan = 0;
/*
* Debugging aid that pauses spa_sync() towards the end.
*/
static const boolean_t zfs_pause_spa_sync = B_FALSE;
/*
* Variables to indicate the livelist condense zthr func should wait at certain
* points for the livelist to be removed - used to test condense/destroy races
*/
static int zfs_livelist_condense_zthr_pause = 0;
static int zfs_livelist_condense_sync_pause = 0;
/*
* Variables to track whether or not condense cancellation has been
* triggered in testing.
*/
static int zfs_livelist_condense_sync_cancel = 0;
static int zfs_livelist_condense_zthr_cancel = 0;
/*
* Variable to track whether or not extra ALLOC blkptrs were added to a
* livelist entry while it was being condensed (caused by the way we track
* remapped blkptrs in dbuf_remap_impl)
*/
static int zfs_livelist_condense_new_alloc = 0;
/*
* ==========================================================================
* SPA properties routines
* ==========================================================================
*/
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval,
uint64_t intval, zprop_source_t src)
{
const char *propname = zpool_prop_to_name(prop);
nvlist_t *propval;
propval = fnvlist_alloc();
fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
if (strval != NULL)
fnvlist_add_string(propval, ZPROP_VALUE, strval);
else
fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
fnvlist_add_nvlist(nvl, propname, propval);
nvlist_free(propval);
}
static int
spa_prop_add(spa_t *spa, const char *propname, nvlist_t *outnvl)
{
zpool_prop_t prop = zpool_name_to_prop(propname);
zprop_source_t src = ZPROP_SRC_NONE;
uint64_t intval;
int err;
/*
* NB: Not all properties lookups via this API require
* the spa props lock, so they must explicitly grab it here.
*/
switch (prop) {
case ZPOOL_PROP_DEDUPCACHED:
err = ddt_get_pool_dedup_cached(spa, &intval);
if (err != 0)
return (SET_ERROR(err));
break;
default:
return (SET_ERROR(EINVAL));
}
spa_prop_add_list(outnvl, prop, NULL, intval, src);
return (0);
}
int
spa_prop_get_nvlist(spa_t *spa, char **props, unsigned int n_props,
nvlist_t *outnvl)
{
int err = 0;
if (props == NULL)
return (0);
for (unsigned int i = 0; i < n_props && err == 0; i++) {
err = spa_prop_add(spa, props[i], outnvl);
}
return (err);
}
/*
* Add a user property (source=src, propname=propval) to an nvlist.
*/
static void
spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
zprop_source_t src)
{
nvlist_t *propval;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
nvlist_free(propval);
}
/*
* Get property values from the spa configuration.
*/
static void
spa_prop_get_config(spa_t *spa, nvlist_t *nv)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
uint64_t size, alloc, cap, version;
const zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
metaslab_class_t *mc = spa_normal_class(spa);
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
if (rvd != NULL) {
alloc = metaslab_class_get_alloc(mc);
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
size = metaslab_class_get_space(mc);
size += metaslab_class_get_space(spa_special_class(spa));
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
spa_prop_add_list(nv, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(nv, ZPOOL_PROP_SIZE, NULL, size, src);
spa_prop_add_list(nv, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
spa_prop_add_list(nv, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
spa_prop_add_list(nv, ZPOOL_PROP_CHECKPOINT, NULL,
spa->spa_checkpoint_info.sci_dspace, src);
spa_prop_add_list(nv, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
spa_prop_add_list(nv, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
spa_prop_add_list(nv, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == SPA_MODE_READ), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
spa_prop_add_list(nv, ZPOOL_PROP_CAPACITY, NULL, cap, src);
spa_prop_add_list(nv, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
spa_prop_add_list(nv, ZPOOL_PROP_BCLONEUSED, NULL,
brt_get_used(spa), src);
spa_prop_add_list(nv, ZPOOL_PROP_BCLONESAVED, NULL,
brt_get_saved(spa), src);
spa_prop_add_list(nv, ZPOOL_PROP_BCLONERATIO, NULL,
brt_get_ratio(spa), src);
spa_prop_add_list(nv, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL,
ddt_get_ddt_dsize(spa), src);
spa_prop_add_list(nv, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
spa_prop_add_list(nv, ZPOOL_PROP_LAST_SCRUBBED_TXG, NULL,
spa_get_last_scrubbed_txg(spa), src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
spa_prop_add_list(nv, ZPOOL_PROP_LOAD_GUID,
NULL, spa_load_guid(spa), src);
}
if (pool != NULL) {
/*
* The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
spa_prop_add_list(nv, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(nv, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
spa_prop_add_list(nv, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(nv, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
spa_prop_add_list(nv, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
spa_prop_add_list(nv, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_compatibility != NULL) {
spa_prop_add_list(nv, ZPOOL_PROP_COMPATIBILITY,
spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
spa_prop_add_list(nv, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
}
/*
* Get zpool property values.
*/
int
spa_prop_get(spa_t *spa, nvlist_t *nv)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t *za;
dsl_pool_t *dp;
int err = 0;
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
za = zap_attribute_alloc();
mutex_enter(&spa->spa_props_lock);
/*
* Get properties from the spa config.
*/
spa_prop_get_config(spa, nv);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0)
goto out;
/*
* Get properties from the MOS pool property object.
*/
for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
(err = zap_cursor_retrieve(&zc, za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t intval = 0;
char *strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
zpool_prop_t prop;
if ((prop = zpool_name_to_prop(za->za_name)) ==
ZPOOL_PROP_INVAL && !zfs_prop_user(za->za_name))
continue;
switch (za->za_integer_length) {
case 8:
/* integer property */
if (za->za_first_integer !=
zpool_prop_default_numeric(prop))
src = ZPROP_SRC_LOCAL;
if (prop == ZPOOL_PROP_BOOTFS) {
dsl_dataset_t *ds = NULL;
err = dsl_dataset_hold_obj(dp,
za->za_first_integer, FTAG, &ds);
if (err != 0)
break;
strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
} else {
strval = NULL;
intval = za->za_first_integer;
}
spa_prop_add_list(nv, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
break;
case 1:
/* string property */
strval = kmem_alloc(za->za_num_integers, KM_SLEEP);
err = zap_lookup(mos, spa->spa_pool_props_object,
za->za_name, 1, za->za_num_integers, strval);
if (err) {
kmem_free(strval, za->za_num_integers);
break;
}
if (prop != ZPOOL_PROP_INVAL) {
spa_prop_add_list(nv, prop, strval, 0, src);
} else {
src = ZPROP_SRC_LOCAL;
spa_prop_add_user(nv, za->za_name, strval,
src);
}
kmem_free(strval, za->za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
out:
mutex_exit(&spa->spa_props_lock);
dsl_pool_config_exit(dp, FTAG);
zap_attribute_free(za);
if (err && err != ENOENT)
return (err);
return (0);
}
/*
* Validate the given pool properties nvlist and modify the list
* for the property values to be set.
*/
static int
spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
uint64_t objnum = 0;
boolean_t has_feature = B_FALSE;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
uint64_t intval;
const char *strval, *slash, *check, *fname;
const char *propname = nvpair_name(elem);
zpool_prop_t prop = zpool_name_to_prop(propname);
switch (prop) {
case ZPOOL_PROP_INVAL:
/*
* Sanitize the input.
*/
if (zfs_prop_user(propname)) {
if (strlen(propname) >= ZAP_MAXNAMELEN) {
error = SET_ERROR(ENAMETOOLONG);
break;
}
if (strlen(fnvpair_value_string(elem)) >=
ZAP_MAXVALUELEN) {
error = SET_ERROR(E2BIG);
break;
}
} else if (zpool_prop_feature(propname)) {
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = SET_ERROR(EINVAL);
break;
}
has_feature = B_TRUE;
} else {
error = SET_ERROR(EINVAL);
break;
}
break;
case ZPOOL_PROP_VERSION:
error = nvpair_value_uint64(elem, &intval);
if (!error &&
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
error = nvpair_value_uint64(elem, &intval);
break;
case ZPOOL_PROP_DELEGATION:
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
case ZPOOL_PROP_AUTOEXPAND:
case ZPOOL_PROP_AUTOTRIM:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_MULTIHOST:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
if (!error) {
uint32_t hostid = zone_get_hostid(NULL);
if (hostid)
spa->spa_hostid = hostid;
else
error = SET_ERROR(ENOTSUP);
}
break;
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
* or the pool is still being created (version == 0),
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = SET_ERROR(ENOTSUP);
break;
}
/*
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = SET_ERROR(ENOTSUP);
break;
}
reset_bootfs = 1;
error = nvpair_value_string(elem, &strval);
if (!error) {
objset_t *os;
if (strval == NULL || strval[0] == '\0') {
objnum = zpool_prop_default_numeric(
ZPOOL_PROP_BOOTFS);
break;
}
error = dmu_objset_hold(strval, FTAG, &os);
if (error != 0)
break;
/* Must be ZPL. */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
dmu_objset_rele(os, FTAG);
}
break;
case ZPOOL_PROP_FAILUREMODE:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > ZIO_FAILURE_MODE_PANIC)
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
* the pool has completely failed. This allows
* the user to change the in-core failmode property
* without syncing it out to disk (I/Os might
* currently be blocked). We do this by returning
* EIO to the caller (spa_prop_set) to trick it
* into thinking we encountered a property validation
* error.
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = SET_ERROR(EIO);
}
break;
case ZPOOL_PROP_CACHEFILE:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
error = SET_ERROR(EINVAL);
break;
}
slash = strrchr(strval, '/');
ASSERT(slash != NULL);
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
error = SET_ERROR(EINVAL);
break;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = SET_ERROR(E2BIG);
break;
default:
break;
}
if (error)
break;
}
(void) nvlist_remove_all(props,
zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
if (!error && reset_bootfs) {
error = nvlist_remove(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
if (!error) {
error = nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
}
}
return (error);
}
void
spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
{
const char *cachefile;
spa_config_dirent_t *dp;
if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
&cachefile) != 0)
return;
dp = kmem_alloc(sizeof (spa_config_dirent_t),
KM_SLEEP);
if (cachefile[0] == '\0')
dp->scd_path = spa_strdup(spa_config_path);
else if (strcmp(cachefile, "none") == 0)
dp->scd_path = NULL;
else
dp->scd_path = spa_strdup(cachefile);
list_insert_head(&spa->spa_config_list, dp);
if (need_sync)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
int
spa_prop_set(spa_t *spa, nvlist_t *nvp)
{
int error;
nvpair_t *elem = NULL;
boolean_t need_sync = B_FALSE;
if ((error = spa_prop_validate(spa, nvp)) != 0)
return (error);
while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
if (prop == ZPOOL_PROP_CACHEFILE ||
prop == ZPOOL_PROP_ALTROOT ||
prop == ZPOOL_PROP_READONLY)
continue;
if (prop == ZPOOL_PROP_INVAL &&
zfs_prop_user(nvpair_name(elem))) {
need_sync = B_TRUE;
break;
}
if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
uint64_t ver = 0;
if (prop == ZPOOL_PROP_VERSION) {
VERIFY(nvpair_value_uint64(elem, &ver) == 0);
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
need_sync = B_TRUE;
}
/* Save time if the version is already set. */
if (ver == spa_version(spa))
continue;
/*
* In addition to the pool directory object, we might
* create the pool properties object, the features for
* read object, the features for write object, or the
* feature descriptions object.
*/
error = dsl_sync_task(spa->spa_name, NULL,
spa_sync_version, &ver,
6, ZFS_SPACE_CHECK_RESERVED);
if (error)
return (error);
continue;
}
need_sync = B_TRUE;
break;
}
if (need_sync) {
return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
nvp, 6, ZFS_SPACE_CHECK_RESERVED));
}
return (0);
}
/*
* If the bootfs property value is dsobj, clear it.
*/
void
spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
{
if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
VERIFY(zap_remove(spa->spa_meta_objset,
spa->spa_pool_props_object,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
spa->spa_bootfs = 0;
}
}
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid __maybe_unused = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state;
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
int error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (SET_ERROR(error));
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state;
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
return (0);
}
static void
spa_change_guid_sync(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
uint64_t oldguid;
vdev_t *rvd = spa->spa_root_vdev;
oldguid = spa_guid(spa);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
rvd->vdev_guid = *newguid;
rvd->vdev_guid_sum += (*newguid - oldguid);
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_STATE, FTAG);
spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
(u_longlong_t)oldguid, (u_longlong_t)*newguid);
}
/*
* Change the GUID for the pool. This is done so that we can later
* re-import a pool built from a clone of our own vdevs. We will modify
* the root vdev's guid, our own pool guid, and then mark all of our
* vdevs dirty. Note that we must make sure that all our vdevs are
* online when we do this, or else any vdevs that weren't present
* would be orphaned from our pool. We are also going to issue a
* sysevent to update any watchers.
*
* The GUID of the pool will be changed to the value pointed to by guidp.
* The GUID may not be set to the reserverd value of 0.
* The new GUID will be generated if guidp is NULL.
*/
int
spa_change_guid(spa_t *spa, const uint64_t *guidp)
{
uint64_t guid;
int error;
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
if (guidp != NULL) {
guid = *guidp;
if (guid == 0) {
error = SET_ERROR(EINVAL);
goto out;
}
if (spa_guid_exists(guid, 0)) {
error = SET_ERROR(EEXIST);
goto out;
}
} else {
guid = spa_generate_guid(NULL);
}
error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
if (error == 0) {
/*
* Clear the kobj flag from all the vdevs to allow
* vdev_cache_process_kobj_evt() to post events to all the
* vdevs since GUID is updated.
*/
vdev_clear_kobj_evt(spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
vdev_clear_kobj_evt(spa->spa_l2cache.sav_vdevs[i]);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
}
out:
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* ==========================================================================
* SPA state manipulation (open/create/destroy/import/export)
* ==========================================================================
*/
static int
spa_error_entry_compare(const void *a, const void *b)
{
const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
int ret;
ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
sizeof (zbookmark_phys_t));
return (TREE_ISIGN(ret));
}
/*
* Utility function which retrieves copies of the current logs and
* re-initializes them in the process.
*/
void
spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t));
memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
}
static void
spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
enum zti_modes mode = ztip->zti_mode;
uint_t value = ztip->zti_value;
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t cpus, flags = TASKQ_DYNAMIC;
switch (mode) {
case ZTI_MODE_FIXED:
ASSERT3U(value, >, 0);
break;
case ZTI_MODE_SYNC:
/*
* Create one wr_iss taskq for every 'zio_taskq_write_tpq' CPUs,
* not to exceed the number of spa allocators, and align to it.
*/
cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
count = MAX(1, cpus / MAX(1, zio_taskq_write_tpq));
count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
count = MIN(count, spa->spa_alloc_count);
while (spa->spa_alloc_count % count != 0 &&
spa->spa_alloc_count < count * 2)
count--;
/*
* zio_taskq_batch_pct is unbounded and may exceed 100%, but no
* single taskq may have more threads than 100% of online cpus.
*/
value = (zio_taskq_batch_pct + count / 2) / count;
value = MIN(value, 100);
flags |= TASKQ_THREADS_CPU_PCT;
break;
case ZTI_MODE_SCALE:
flags |= TASKQ_THREADS_CPU_PCT;
/*
* We want more taskqs to reduce lock contention, but we want
* less for better request ordering and CPU utilization.
*/
cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
if (zio_taskq_batch_tpq > 0) {
count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
zio_taskq_batch_tpq);
} else {
/*
* Prefer 6 threads per taskq, but no more taskqs
* than threads in them on large systems. For 80%:
*
* taskq taskq total
* cpus taskqs percent threads threads
* ------- ------- ------- ------- -------
* 1 1 80% 1 1
* 2 1 80% 1 1
* 4 1 80% 3 3
* 8 2 40% 3 6
* 16 3 27% 4 12
* 32 5 16% 5 25
* 64 7 11% 7 49
* 128 10 8% 10 100
* 256 14 6% 15 210
*/
count = 1 + cpus / 6;
while (count * count > cpus)
count--;
}
/* Limit each taskq within 100% to not trigger assertion. */
count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
value = (zio_taskq_batch_pct + count / 2) / count;
break;
case ZTI_MODE_NULL:
tqs->stqs_count = 0;
tqs->stqs_taskq = NULL;
return;
default:
panic("unrecognized mode for %s_%s taskq (%u:%u) in "
"spa_taskqs_init()",
zio_type_name[t], zio_taskq_types[q], mode, value);
break;
}
ASSERT3U(count, >, 0);
tqs->stqs_count = count;
tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
for (uint_t i = 0; i < count; i++) {
taskq_t *tq;
char name[32];
if (count > 1)
(void) snprintf(name, sizeof (name), "%s_%s_%u",
zio_type_name[t], zio_taskq_types[q], i);
else
(void) snprintf(name, sizeof (name), "%s_%s",
zio_type_name[t], zio_taskq_types[q]);
#ifdef HAVE_SYSDC
if (zio_taskq_sysdc && spa->spa_proc != &p0) {
(void) zio_taskq_basedc;
tq = taskq_create_sysdc(name, value, 50, INT_MAX,
spa->spa_proc, zio_taskq_basedc, flags);
} else {
#endif
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
* intensive. Run it at slightly less important
* priority than the other taskqs.
*
* Under Linux and FreeBSD this means incrementing
* the priority value as opposed to platforms like
* illumos where it should be decremented.
*
* On FreeBSD, if priorities divided by four (RQ_PPQ)
* are equal then a difference between them is
* insignificant.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
#if defined(__linux__)
pri++;
#elif defined(__FreeBSD__)
pri += 4;
#else
#error "unknown OS"
#endif
}
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
#ifdef HAVE_SYSDC
}
#endif
tqs->stqs_taskq[i] = tq;
}
}
static void
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0);
return;
}
for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]);
}
kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
tqs->stqs_taskq = NULL;
}
#ifdef _KERNEL
/*
* The READ and WRITE rows of zio_taskqs are configurable at module load time
* by setting zio_taskq_read or zio_taskq_write.
*
* Example (the defaults for READ and WRITE)
* zio_taskq_read='fixed,1,8 null scale null'
* zio_taskq_write='sync null scale null'
*
* Each sets the entire row at a time.
*
* 'fixed' is parameterised: fixed,Q,T where Q is number of taskqs, T is number
* of threads per taskq.
*
* 'null' can only be set on the high-priority queues (queue selection for
* high-priority queues will fall back to the regular queue if the high-pri
* is NULL.
*/
static const char *const modes[ZTI_NMODES] = {
"fixed", "scale", "sync", "null"
};
/* Parse the incoming config string. Modifies cfg */
static int
spa_taskq_param_set(zio_type_t t, char *cfg)
{
int err = 0;
zio_taskq_info_t row[ZIO_TASKQ_TYPES] = {{0}};
char *next = cfg, *tok, *c;
/*
* Parse out each element from the string and fill `row`. The entire
* row has to be set at once, so any errors are flagged by just
* breaking out of this loop early.
*/
uint_t q;
for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
/* `next` is the start of the config */
if (next == NULL)
break;
/* Eat up leading space */
while (isspace(*next))
next++;
if (*next == '\0')
break;
/* Mode ends at space or end of string */
tok = next;
next = strchr(tok, ' ');
if (next != NULL) *next++ = '\0';
/* Parameters start after a comma */
c = strchr(tok, ',');
if (c != NULL) *c++ = '\0';
/* Match mode string */
uint_t mode;
for (mode = 0; mode < ZTI_NMODES; mode++)
if (strcmp(tok, modes[mode]) == 0)
break;
if (mode == ZTI_NMODES)
break;
/* Invalid canary */
row[q].zti_mode = ZTI_NMODES;
/* Per-mode setup */
switch (mode) {
/*
* FIXED is parameterised: number of queues, and number of
* threads per queue.
*/
case ZTI_MODE_FIXED: {
/* No parameters? */
if (c == NULL || *c == '\0')
break;
/* Find next parameter */
tok = c;
c = strchr(tok, ',');
if (c == NULL)
break;
/* Take digits and convert */
unsigned long long nq;
if (!(isdigit(*tok)))
break;
err = ddi_strtoull(tok, &tok, 10, &nq);
/* Must succeed and also end at the next param sep */
if (err != 0 || tok != c)
break;
/* Move past the comma */
tok++;
/* Need another number */
if (!(isdigit(*tok)))
break;
/* Remember start to make sure we moved */
c = tok;
/* Take digits */
unsigned long long ntpq;
err = ddi_strtoull(tok, &tok, 10, &ntpq);
/* Must succeed, and moved forward */
if (err != 0 || tok == c || *tok != '\0')
break;
/*
* sanity; zero queues/threads make no sense, and
* 16K is almost certainly more than anyone will ever
* need and avoids silly numbers like UINT32_MAX
*/
if (nq == 0 || nq >= 16384 ||
ntpq == 0 || ntpq >= 16384)
break;
const zio_taskq_info_t zti = ZTI_P(ntpq, nq);
row[q] = zti;
break;
}
case ZTI_MODE_SCALE: {
const zio_taskq_info_t zti = ZTI_SCALE;
row[q] = zti;
break;
}
case ZTI_MODE_SYNC: {
const zio_taskq_info_t zti = ZTI_SYNC;
row[q] = zti;
break;
}
case ZTI_MODE_NULL: {
/*
* Can only null the high-priority queues; the general-
* purpose ones have to exist.
*/
if (q != ZIO_TASKQ_ISSUE_HIGH &&
q != ZIO_TASKQ_INTERRUPT_HIGH)
break;
const zio_taskq_info_t zti = ZTI_NULL;
row[q] = zti;
break;
}
default:
break;
}
/* Ensure we set a mode */
if (row[q].zti_mode == ZTI_NMODES)
break;
}
/* Didn't get a full row, fail */
if (q < ZIO_TASKQ_TYPES)
return (SET_ERROR(EINVAL));
/* Eat trailing space */
if (next != NULL)
while (isspace(*next))
next++;
/* If there's anything left over then fail */
if (next != NULL && *next != '\0')
return (SET_ERROR(EINVAL));
/* Success! Copy it into the real config */
for (q = 0; q < ZIO_TASKQ_TYPES; q++)
zio_taskqs[t][q] = row[q];
return (0);
}
static int
spa_taskq_param_get(zio_type_t t, char *buf, boolean_t add_newline)
{
int pos = 0;
/* Build paramater string from live config */
const char *sep = "";
for (uint_t q = 0; q < ZIO_TASKQ_TYPES; q++) {
const zio_taskq_info_t *zti = &zio_taskqs[t][q];
if (zti->zti_mode == ZTI_MODE_FIXED)
pos += sprintf(&buf[pos], "%s%s,%u,%u", sep,
modes[zti->zti_mode], zti->zti_count,
zti->zti_value);
else
pos += sprintf(&buf[pos], "%s%s", sep,
modes[zti->zti_mode]);
sep = " ";
}
if (add_newline)
buf[pos++] = '\n';
buf[pos] = '\0';
return (pos);
}
#ifdef __linux__
static int
spa_taskq_read_param_set(const char *val, zfs_kernel_param_t *kp)
{
char *cfg = kmem_strdup(val);
int err = spa_taskq_param_set(ZIO_TYPE_READ, cfg);
kmem_free(cfg, strlen(val)+1);
return (-err);
}
static int
spa_taskq_read_param_get(char *buf, zfs_kernel_param_t *kp)
{
return (spa_taskq_param_get(ZIO_TYPE_READ, buf, TRUE));
}
static int
spa_taskq_write_param_set(const char *val, zfs_kernel_param_t *kp)
{
char *cfg = kmem_strdup(val);
int err = spa_taskq_param_set(ZIO_TYPE_WRITE, cfg);
kmem_free(cfg, strlen(val)+1);
return (-err);
}
static int
spa_taskq_write_param_get(char *buf, zfs_kernel_param_t *kp)
{
return (spa_taskq_param_get(ZIO_TYPE_WRITE, buf, TRUE));
}
#else
/*
* On FreeBSD load-time parameters can be set up before malloc() is available,
* so we have to do all the parsing work on the stack.
*/
#define SPA_TASKQ_PARAM_MAX (128)
static int
spa_taskq_read_param(ZFS_MODULE_PARAM_ARGS)
{
char buf[SPA_TASKQ_PARAM_MAX];
int err;
(void) spa_taskq_param_get(ZIO_TYPE_READ, buf, FALSE);
err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
if (err || req->newptr == NULL)
return (err);
return (spa_taskq_param_set(ZIO_TYPE_READ, buf));
}
static int
spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS)
{
char buf[SPA_TASKQ_PARAM_MAX];
int err;
(void) spa_taskq_param_get(ZIO_TYPE_WRITE, buf, FALSE);
err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
if (err || req->newptr == NULL)
return (err);
return (spa_taskq_param_set(ZIO_TYPE_WRITE, buf));
}
#endif
#endif /* _KERNEL */
/*
* Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
* Note that a type may have multiple discrete taskqs to avoid lock contention
* on the taskq itself.
*/
void
spa_taskq_dispatch(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, zio_t *zio, boolean_t cutinline)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
/*
* NB: We are assuming that the zio can only be dispatched
* to a single taskq at a time. It would be a grievous error
* to dispatch the zio to another taskq at the same time.
*/
ASSERT(zio);
ASSERT(taskq_empty_ent(&zio->io_tqent));
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else if ((t == ZIO_TYPE_WRITE) && (q == ZIO_TASKQ_ISSUE) &&
ZIO_HAS_ALLOCATOR(zio)) {
tq = tqs->stqs_taskq[zio->io_allocator % tqs->stqs_count];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
taskq_dispatch_ent(tq, func, zio, cutinline ? TQ_FRONT : 0,
&zio->io_tqent);
}
static void
spa_create_zio_taskqs(spa_t *spa)
{
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q);
}
}
}
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
{
psetid_t zio_taskq_psrset_bind = PS_NONE;
callb_cpr_t cprinfo;
spa_t *spa = arg;
user_t *pu = PTOU(curproc);
CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
spa->spa_name);
ASSERT(curproc != &p0);
(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
"zpool-%s", spa->spa_name);
(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
/* bind this thread to the requested psrset */
if (zio_taskq_psrset_bind != PS_NONE) {
pool_lock();
mutex_enter(&cpu_lock);
mutex_enter(&pidlock);
mutex_enter(&curproc->p_lock);
if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
0, NULL, NULL) == 0) {
curthread->t_bind_pset = zio_taskq_psrset_bind;
} else {
cmn_err(CE_WARN,
"Couldn't bind process for zfs pool \"%s\" to "
"pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
}
mutex_exit(&curproc->p_lock);
mutex_exit(&pidlock);
mutex_exit(&cpu_lock);
pool_unlock();
}
#ifdef HAVE_SYSDC
if (zio_taskq_sysdc) {
sysdc_thread_enter(curthread, 100, 0);
}
#endif
spa->spa_proc = curproc;
spa->spa_did = curthread->t_did;
spa_create_zio_taskqs(spa);
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
spa->spa_proc_state = SPA_PROC_ACTIVE;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
while (spa->spa_proc_state == SPA_PROC_ACTIVE)
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
spa->spa_proc_state = SPA_PROC_GONE;
spa->spa_proc = &p0;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
mutex_enter(&curproc->p_lock);
lwp_exit();
}
#endif
extern metaslab_ops_t *metaslab_allocator(spa_t *spa);
/*
* Activate an uninitialized pool.
*/
static void
spa_activate(spa_t *spa, spa_mode_t mode)
{
metaslab_ops_t *msp = metaslab_allocator(spa);
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_mode = mode;
spa->spa_read_spacemaps = spa_mode_readable_spacemaps;
spa->spa_normal_class = metaslab_class_create(spa, msp);
spa->spa_log_class = metaslab_class_create(spa, msp);
spa->spa_embedded_log_class = metaslab_class_create(spa, msp);
spa->spa_special_class = metaslab_class_create(spa, msp);
spa->spa_dedup_class = metaslab_class_create(spa, msp);
/* Try to create a covering process */
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
ASSERT(spa->spa_proc == &p0);
spa->spa_did = 0;
#ifdef HAVE_SPA_THREAD
/* Only create a process if we're going to be around a while. */
if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
NULL, 0) == 0) {
spa->spa_proc_state = SPA_PROC_CREATED;
while (spa->spa_proc_state == SPA_PROC_CREATED) {
cv_wait(&spa->spa_proc_cv,
&spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
ASSERT(spa->spa_proc != &p0);
ASSERT(spa->spa_did != 0);
} else {
#ifdef _KERNEL
cmn_err(CE_WARN,
"Couldn't create process for zfs pool \"%s\"\n",
spa->spa_name);
#endif
}
}
#endif /* HAVE_SPA_THREAD */
mutex_exit(&spa->spa_proc_lock);
/* If we didn't create a process, we need to create our taskqs. */
if (spa->spa_proc == &p0) {
spa_create_zio_taskqs(spa);
}
for (size_t i = 0; i < TXG_SIZE; i++) {
spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
}
list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_config_dirty_node));
list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
offsetof(objset_t, os_evicting_node));
list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_state_dirty_node));
txg_list_create(&spa->spa_vdev_txg_list, spa,
offsetof(struct vdev, vdev_txg_node));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_healed,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
spa_activate_os(spa);
spa_keystore_init(&spa->spa_keystore);
/*
* This taskq is used to perform zvol-minor-related tasks
* asynchronously. This has several advantages, including easy
* resolution of various deadlocks.
*
* The taskq must be single threaded to ensure tasks are always
* processed in the order in which they were dispatched.
*
* A taskq per pool allows one to keep the pools independent.
* This way if one pool is suspended, it will not impact another.
*
* The preferred location to dispatch a zvol minor task is a sync
* task. In this context, there is easy access to the spa_t and minimal
* error handling is required because the sync task must succeed.
*/
spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1, INT_MAX, 0);
/*
* The taskq to preload metaslabs.
*/
spa->spa_metaslab_taskq = taskq_create("z_metaslab",
metaslab_preload_pct, maxclsyspri, 1, INT_MAX,
TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
/*
* Taskq dedicated to prefetcher threads: this is used to prevent the
* pool traverse code from monopolizing the global (and limited)
* system_taskq by inappropriately scheduling long running tasks on it.
*/
spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
/*
* The taskq to upgrade datasets in this pool. Currently used by
* feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
}
/*
* Opposite of spa_activate().
*/
static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
if (spa->spa_zvol_taskq) {
taskq_destroy(spa->spa_zvol_taskq);
spa->spa_zvol_taskq = NULL;
}
if (spa->spa_metaslab_taskq) {
taskq_destroy(spa->spa_metaslab_taskq);
spa->spa_metaslab_taskq = NULL;
}
if (spa->spa_prefetch_taskq) {
taskq_destroy(spa->spa_prefetch_taskq);
spa->spa_prefetch_taskq = NULL;
}
if (spa->spa_upgrade_taskq) {
taskq_destroy(spa->spa_upgrade_taskq);
spa->spa_upgrade_taskq = NULL;
}
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
list_destroy(&spa->spa_evicting_os_list);
list_destroy(&spa->spa_state_dirty_list);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q);
}
}
for (size_t i = 0; i < TXG_SIZE; i++) {
ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
VERIFY0(zio_wait(spa->spa_txg_zio[i]));
spa->spa_txg_zio[i] = NULL;
}
metaslab_class_destroy(spa->spa_normal_class);
spa->spa_normal_class = NULL;
metaslab_class_destroy(spa->spa_log_class);
spa->spa_log_class = NULL;
metaslab_class_destroy(spa->spa_embedded_log_class);
spa->spa_embedded_log_class = NULL;
metaslab_class_destroy(spa->spa_special_class);
spa->spa_special_class = NULL;
metaslab_class_destroy(spa->spa_dedup_class);
spa->spa_dedup_class = NULL;
/*
* If this was part of an import or the open otherwise failed, we may
* still have errors left in the queues. Empty them just in case.
*/
spa_errlog_drain(spa);
avl_destroy(&spa->spa_errlist_scrub);
avl_destroy(&spa->spa_errlist_last);
avl_destroy(&spa->spa_errlist_healed);
spa_keystore_fini(&spa->spa_keystore);
spa->spa_state = POOL_STATE_UNINITIALIZED;
mutex_enter(&spa->spa_proc_lock);
if (spa->spa_proc_state != SPA_PROC_NONE) {
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
spa->spa_proc_state = SPA_PROC_DEACTIVATE;
cv_broadcast(&spa->spa_proc_cv);
while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
ASSERT(spa->spa_proc != &p0);
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
spa->spa_proc_state = SPA_PROC_NONE;
}
ASSERT(spa->spa_proc == &p0);
mutex_exit(&spa->spa_proc_lock);
/*
* We want to make sure spa_thread() has actually exited the ZFS
* module, so that the module can't be unloaded out from underneath
* it.
*/
if (spa->spa_did != 0) {
thread_join(spa->spa_did);
spa->spa_did = 0;
}
spa_deactivate_os(spa);
}
/*
* Verify a pool configuration, and construct the vdev tree appropriately. This
* will create all the necessary vdevs in the appropriate layout, with each vdev
* in the CLOSED state. This will prep the pool before open/creation/import.
* All vdev validation is done by the vdev_alloc() routine.
*/
int
spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
uint_t id, int atype)
{
nvlist_t **child;
uint_t children;
int error;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error);
if ((*vdp)->vdev_ops->vdev_op_leaf)
return (0);
error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (error == ENOENT)
return (0);
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (SET_ERROR(EINVAL));
}
for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
vdev_free(*vdp);
*vdp = NULL;
return (error);
}
}
ASSERT(*vdp != NULL);
return (0);
}
static boolean_t
spa_should_flush_logs_on_unload(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return (B_FALSE);
if (!spa_writeable(spa))
return (B_FALSE);
if (!spa->spa_sync_on)
return (B_FALSE);
if (spa_state(spa) != POOL_STATE_EXPORTED)
return (B_FALSE);
if (zfs_keep_log_spacemaps_at_export)
return (B_FALSE);
return (B_TRUE);
}
/*
* Opens a transaction that will set the flag that will instruct
* spa_sync to attempt to flush all the metaslabs for that txg.
*/
static void
spa_unload_log_sm_flush_all(spa_t *spa)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
}
static void
spa_unload_log_sm_metadata(spa_t *spa)
{
void *cookie = NULL;
spa_log_sm_t *sls;
log_summary_entry_t *e;
while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
&cookie)) != NULL) {
VERIFY0(sls->sls_mscount);
kmem_free(sls, sizeof (spa_log_sm_t));
}
while ((e = list_remove_head(&spa->spa_log_summary)) != NULL) {
VERIFY0(e->lse_mscount);
kmem_free(e, sizeof (log_summary_entry_t));
}
spa->spa_unflushed_stats.sus_nblocks = 0;
spa->spa_unflushed_stats.sus_memused = 0;
spa->spa_unflushed_stats.sus_blocklimit = 0;
}
static void
spa_destroy_aux_threads(spa_t *spa)
{
if (spa->spa_condense_zthr != NULL) {
zthr_destroy(spa->spa_condense_zthr);
spa->spa_condense_zthr = NULL;
}
if (spa->spa_checkpoint_discard_zthr != NULL) {
zthr_destroy(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr = NULL;
}
if (spa->spa_livelist_delete_zthr != NULL) {
zthr_destroy(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr = NULL;
}
if (spa->spa_livelist_condense_zthr != NULL) {
zthr_destroy(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr = NULL;
}
if (spa->spa_raidz_expand_zthr != NULL) {
zthr_destroy(spa->spa_raidz_expand_zthr);
spa->spa_raidz_expand_zthr = NULL;
}
}
/*
* Opposite of spa_load().
*/
static void
spa_unload(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_export_thread == curthread);
ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
spa_import_progress_remove(spa_guid(spa));
spa_load_note(spa, "UNLOADING");
spa_wake_waiters(spa);
/*
* If we have set the spa_final_txg, we have already performed the
* tasks below in spa_export_common(). We should not redo it here since
* we delay the final TXGs beyond what spa_final_txg is set at.
*/
if (spa->spa_final_txg == UINT64_MAX) {
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
* time flushing as many metaslabs as we can in an attempt to
* destroy log space maps and save import time.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
/*
* Stop async tasks.
*/
spa_async_suspend(spa);
if (spa->spa_root_vdev) {
vdev_t *root_vdev = spa->spa_root_vdev;
vdev_initialize_stop_all(root_vdev,
VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
l2arc_spa_rebuild_stop(spa);
}
}
/*
* Stop syncing.
*/
if (spa->spa_sync_on) {
txg_sync_stop(spa->spa_dsl_pool);
spa->spa_sync_on = B_FALSE;
}
/*
* This ensures that there is no async metaslab prefetching
* while we attempt to unload the spa.
*/
taskq_wait(spa->spa_metaslab_taskq);
if (spa->spa_mmp.mmp_thread)
mmp_thread_stop(spa);
/*
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
}
if (spa->spa_vdev_removal != NULL) {
spa_vdev_removal_destroy(spa->spa_vdev_removal);
spa->spa_vdev_removal = NULL;
}
spa_destroy_aux_threads(spa);
spa_condense_fini(spa);
bpobj_close(&spa->spa_deferred_bpobj);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
/*
* Close all vdevs.
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
ASSERT(spa->spa_root_vdev == NULL);
/*
* Close the dsl pool.
*/
if (spa->spa_dsl_pool) {
dsl_pool_close(spa->spa_dsl_pool);
spa->spa_dsl_pool = NULL;
spa->spa_meta_objset = NULL;
}
ddt_unload(spa);
brt_unload(spa);
spa_unload_log_sm_metadata(spa);
/*
* Drop and purge level 2 cache
*/
spa_l2cache_drop(spa);
if (spa->spa_spares.sav_vdevs) {
for (int i = 0; i < spa->spa_spares.sav_count; i++)
vdev_free(spa->spa_spares.sav_vdevs[i]);
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
spa->spa_spares.sav_vdevs = NULL;
}
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
}
spa->spa_spares.sav_count = 0;
if (spa->spa_l2cache.sav_vdevs) {
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
vdev_free(spa->spa_l2cache.sav_vdevs[i]);
}
kmem_free(spa->spa_l2cache.sav_vdevs,
spa->spa_l2cache.sav_count * sizeof (void *));
spa->spa_l2cache.sav_vdevs = NULL;
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
}
spa->spa_l2cache.sav_count = 0;
spa->spa_async_suspended = 0;
spa->spa_indirect_vdevs_loaded = B_FALSE;
if (spa->spa_comment != NULL) {
spa_strfree(spa->spa_comment);
spa->spa_comment = NULL;
}
if (spa->spa_compatibility != NULL) {
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = NULL;
}
spa->spa_raidz_expand = NULL;
spa_config_exit(spa, SCL_ALL, spa);
}
/*
* Load (or re-load) the current list of vdevs describing the active spares for
* this pool. When this is called, we have some form of basic information in
* 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
*/
void
spa_load_spares(spa_t *spa)
{
nvlist_t **spares;
uint_t nspares;
int i;
vdev_t *vd, *tvd;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As spare vdevs are shared among open pools, we skip loading
* them when we load the checkpointed state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* First, close and free any existing spare vdevs.
*/
if (spa->spa_spares.sav_vdevs) {
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
/* Undo the call to spa_activate() below */
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL && tvd->vdev_isspare)
spa_spare_remove(tvd);
vdev_close(vd);
vdev_free(vd);
}
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
}
if (spa->spa_spares.sav_config == NULL)
nspares = 0;
else
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares));
spa->spa_spares.sav_count = (int)nspares;
spa->spa_spares.sav_vdevs = NULL;
if (nspares == 0)
return;
/*
* Construct the array of vdevs, opening them to get status in the
* process. For each spare, there is potentially two different vdev_t
* structures associated with it: one in the list of spares (used only
* for basic validation purposes) and one in the active vdev
* configuration (if it's spared in). During this phase we open and
* validate each vdev on the spare list. If the vdev also exists in the
* active configuration, then we also mark this vdev as an active spare.
*/
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
VDEV_ALLOC_SPARE) == 0);
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL) {
if (!tvd->vdev_isspare)
spa_spare_add(tvd);
/*
* We only mark the spare active if we were successfully
* able to load the vdev. Otherwise, importing a pool
* with a bad active spare would result in strange
* behavior, because multiple pool would think the spare
* is actively in use.
*
* There is a vulnerability here to an equally bizarre
* circumstance, where a dead active spare is later
* brought back to life (onlined or otherwise). Given
* the rarity of this scenario, and the extra complexity
* it adds, we ignore the possibility.
*/
if (!vdev_is_dead(tvd))
spa_spare_activate(tvd);
}
vd->vdev_top = vd;
vd->vdev_aux = &spa->spa_spares;
if (vdev_open(vd) != 0)
continue;
if (vdev_validate_aux(vd) == 0)
spa_spare_add(vd);
}
/*
* Recompute the stashed list of spares, with status information
* this time.
*/
fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES);
spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++)
spares[i] = vdev_config_generate(spa,
spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
spa->spa_spares.sav_count);
for (i = 0; i < spa->spa_spares.sav_count; i++)
nvlist_free(spares[i]);
kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
}
/*
* Load (or re-load) the current list of vdevs describing the active l2cache for
* this pool. When this is called, we have some form of basic information in
* 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
* Devices which are already active have their details maintained, and are
* not re-opened.
*/
void
spa_load_l2cache(spa_t *spa)
{
nvlist_t **l2cache = NULL;
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid;
vdev_t *vd, **oldvdevs, **newvdevs;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As L2 caches are part of the ARC which is shared among open
* pools, we skip loading them when we load the checkpointed
* state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
oldvdevs = sav->sav_vdevs;
oldnvdevs = sav->sav_count;
sav->sav_vdevs = NULL;
sav->sav_count = 0;
if (sav->sav_config == NULL) {
nl2cache = 0;
newvdevs = NULL;
goto out;
}
VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
/*
* Process new nvlist of vdevs.
*/
for (i = 0; i < nl2cache; i++) {
guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID);
newvdevs[i] = NULL;
for (j = 0; j < oldnvdevs; j++) {
vd = oldvdevs[j];
if (vd != NULL && guid == vd->vdev_guid) {
/*
* Retain previous vdev for add/remove ops.
*/
newvdevs[i] = vd;
oldvdevs[j] = NULL;
break;
}
}
if (newvdevs[i] == NULL) {
/*
* Create new vdev
*/
VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
VDEV_ALLOC_L2CACHE) == 0);
ASSERT(vd != NULL);
newvdevs[i] = vd;
/*
* Commit this vdev as an l2cache device,
* even if it fails to open.
*/
spa_l2cache_add(vd);
vd->vdev_top = vd;
vd->vdev_aux = sav;
spa_l2cache_activate(vd);
if (vdev_open(vd) != 0)
continue;
(void) vdev_validate_aux(vd);
if (!vdev_is_dead(vd))
l2arc_add_vdev(spa, vd);
/*
* Upon cache device addition to a pool or pool
* creation with a cache device or if the header
* of the device is invalid we issue an async
* TRIM command for the whole device which will
* execute if l2arc_trim_ahead > 0.
*/
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
}
sav->sav_vdevs = newvdevs;
sav->sav_count = (int)nl2cache;
/*
* Recompute the stashed list of l2cache devices, with status
* information this time.
*/
fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE);
if (sav->sav_count > 0)
l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
(const nvlist_t * const *)l2cache, sav->sav_count);
out:
/*
* Purge vdevs that were dropped
*/
if (oldvdevs) {
for (i = 0; i < oldnvdevs; i++) {
uint64_t pool;
vd = oldvdevs[i];
if (vd != NULL) {
ASSERT(vd->vdev_isl2cache);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
vdev_clear_stats(vd);
vdev_free(vd);
}
}
kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
}
for (i = 0; i < sav->sav_count; i++)
nvlist_free(l2cache[i]);
if (sav->sav_count)
kmem_free(l2cache, sav->sav_count * sizeof (void *));
}
static int
load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
{
dmu_buf_t *db;
char *packed = NULL;
size_t nvsize = 0;
int error;
*value = NULL;
error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
if (error)
return (error);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
packed = vmem_alloc(nvsize, KM_SLEEP);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
vmem_free(packed, nvsize);
return (error);
}
/*
* Concrete top-level vdevs that are not missing and are not logs. At every
* spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
*/
static uint64_t
spa_healthy_core_tvds(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t tvds = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog)
continue;
if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
tvds++;
}
return (tvds);
}
/*
* Checks to see if the given vdev could not be opened, in which case we post a
* sysevent to notify the autoreplace code that the device has been removed.
*/
static void
spa_check_removed(vdev_t *vd)
{
for (uint64_t c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
vdev_is_concrete(vd)) {
zfs_post_autoreplace(vd->vdev_spa, vd);
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
}
}
static int
spa_check_for_missing_logs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're doing a normal import, then build up any additional
* diagnostic information about missing log devices.
* We'll pass this up to the user for further processing.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
nvlist_t **child, *nv;
uint64_t idx = 0;
child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
nv = fnvlist_alloc();
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
/*
* We consider a device as missing only if it failed
* to open (i.e. offline or faulted is not considered
* as missing).
*/
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
child[idx++] = vdev_config_generate(spa, tvd,
B_FALSE, VDEV_CONFIG_MISSING);
}
}
if (idx > 0) {
fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t * const *)child, idx);
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv);
for (uint64_t i = 0; i < idx; i++)
nvlist_free(child[i]);
}
nvlist_free(nv);
kmem_free(child, rvd->vdev_children * sizeof (char **));
if (idx > 0) {
spa_load_failed(spa, "some log devices are missing");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
} else {
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
spa_set_log_state(spa, SPA_LOG_CLEAR);
spa_load_note(spa, "some log devices are "
"missing, ZIL is dropped.");
vdev_dbgmsg_print_tree(rvd, 2);
break;
}
}
}
return (0);
}
/*
* Check for missing log devices
*/
static boolean_t
spa_check_logs(spa_t *spa)
{
boolean_t rv = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
switch (spa->spa_log_state) {
default:
break;
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
}
/*
* Passivate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static boolean_t
spa_passivate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
}
return (slog_found);
}
/*
* Activate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static void
spa_activate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_activate(tvd->vdev_mg);
}
}
}
int
spa_reset_logs(spa_t *spa)
{
int error;
error = dmu_objset_find(spa_name(spa), zil_reset,
NULL, DS_FIND_CHILDREN);
if (error == 0) {
/*
* We successfully offlined the log device, sync out the
* current txg so that the "stubby" block can be removed
* by zil_sync().
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
}
return (error);
}
static void
spa_aux_check_removed(spa_aux_vdev_t *sav)
{
for (int i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]);
}
void
spa_claim_notify(zio_t *zio)
{
spa_t *spa = zio->io_spa;
if (zio->io_error)
return;
mutex_enter(&spa->spa_props_lock); /* any mutex will do */
if (spa->spa_claim_max_txg < BP_GET_LOGICAL_BIRTH(zio->io_bp))
spa->spa_claim_max_txg = BP_GET_LOGICAL_BIRTH(zio->io_bp);
mutex_exit(&spa->spa_props_lock);
}
typedef struct spa_load_error {
boolean_t sle_verify_data;
uint64_t sle_meta_count;
uint64_t sle_data_count;
} spa_load_error_t;
static void
spa_load_verify_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
spa_load_error_t *sle = zio->io_private;
dmu_object_type_t type = BP_GET_TYPE(bp);
int error = zio->io_error;
spa_t *spa = zio->io_spa;
abd_free(zio->io_abd);
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
atomic_inc_64(&sle->sle_meta_count);
else
atomic_inc_64(&sle->sle_data_count);
}
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
/*
* Maximum number of inflight bytes is the log2 fraction of the arc size.
* By default, we set it to 1/16th of the arc.
*/
static uint_t spa_load_verify_shift = 4;
static int spa_load_verify_metadata = B_TRUE;
static int spa_load_verify_data = B_TRUE;
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zio_t *rio = arg;
spa_load_error_t *sle = rio->io_private;
(void) zilog, (void) dnp;
/*
* Note: normally this routine will not be called if
* spa_load_verify_metadata is not set. However, it may be useful
* to manually set the flag after the traversal has begun.
*/
if (!spa_load_verify_metadata)
return (0);
/*
* Sanity check the block pointer in order to detect obvious damage
* before using the contents in subsequent checks or in zio_read().
* When damaged consider it to be a metadata error since we cannot
* trust the BP_GET_TYPE and BP_GET_LEVEL values.
*/
if (!zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
atomic_inc_64(&sle->sle_meta_count);
return (0);
}
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
if (!BP_IS_METADATA(bp) &&
(!spa_load_verify_data || !sle->sle_verify_data))
return (0);
uint64_t maxinflight_bytes =
arc_target_bytes() >> spa_load_verify_shift;
size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes >= maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
return (0);
}
static int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) dp, (void) arg;
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
spa_load_verify(spa_t *spa)
{
zio_t *rio;
spa_load_error_t sle = { 0 };
zpool_load_policy_t policy;
boolean_t verify_ok = B_FALSE;
int error = 0;
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_rewind & ZPOOL_NEVER_REWIND ||
policy.zlp_maxmeta == UINT64_MAX)
return (0);
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
error = dmu_objset_find_dp(spa->spa_dsl_pool,
spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
DS_FIND_CHILDREN);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
if (error != 0)
return (error);
/*
* Verify data only if we are rewinding or error limit was set.
* Otherwise nothing except dbgmsg care about it to waste time.
*/
sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) ||
(policy.zlp_maxdata < UINT64_MAX);
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
if (spa->spa_extreme_rewind) {
spa_load_note(spa, "performing a complete scan of the "
"pool since extreme rewind is on. This may take "
"a very long time.\n (spa_load_verify_data=%u, "
"spa_load_verify_metadata=%u)",
spa_load_verify_data, spa_load_verify_metadata);
}
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
}
(void) zio_wait(rio);
ASSERT0(spa->spa_load_verify_bytes);
spa->spa_load_meta_errors = sle.sle_meta_count;
spa->spa_load_data_errors = sle.sle_data_count;
if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
spa_load_note(spa, "spa_load_verify found %llu metadata errors "
"and %llu data errors", (u_longlong_t)sle.sle_meta_count,
(u_longlong_t)sle.sle_data_count);
}
if (spa_load_verify_dryrun ||
(!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
sle.sle_data_count <= policy.zlp_maxdata)) {
int64_t loss = 0;
verify_ok = B_TRUE;
spa->spa_load_txg = spa->spa_uberblock.ub_txg;
spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME,
spa->spa_load_txg_ts);
fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME,
loss);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count);
} else {
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
}
if (spa_load_verify_dryrun)
return (0);
if (error) {
if (error != ENXIO && error != EIO)
error = SET_ERROR(EIO);
return (error);
}
return (verify_ok ? 0 : EIO);
}
/*
* Find a value in the pool props object.
*/
static void
spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
{
(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
}
/*
* Find a value in the pool directory object.
*/
static int
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
{
int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val);
if (error != 0 && (error != ENOENT || log_enoent)) {
spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
"[error=%d]", name, error);
}
return (error);
}
static int
spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
{
vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
return (SET_ERROR(err));
}
boolean_t
spa_livelist_delete_check(spa_t *spa)
{
return (spa->spa_livelists_to_delete != 0);
}
static boolean_t
spa_livelist_delete_cb_check(void *arg, zthr_t *z)
{
(void) z;
spa_t *spa = arg;
return (spa_livelist_delete_check(spa));
}
static int
delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = arg;
zio_free(spa, tx->tx_txg, bp);
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
return (0);
}
static int
dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
{
int err;
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
zap_cursor_init(&zc, os, zap_obj);
err = zap_cursor_retrieve(&zc, za);
zap_cursor_fini(&zc);
if (err == 0)
*llp = za->za_first_integer;
zap_attribute_free(za);
return (err);
}
/*
* Components of livelist deletion that must be performed in syncing
* context: freeing block pointers and updating the pool-wide data
* structures to indicate how much work is left to do
*/
typedef struct sublist_delete_arg {
spa_t *spa;
dsl_deadlist_t *ll;
uint64_t key;
bplist_t *to_free;
} sublist_delete_arg_t;
static void
sublist_delete_sync(void *arg, dmu_tx_t *tx)
{
sublist_delete_arg_t *sda = arg;
spa_t *spa = sda->spa;
dsl_deadlist_t *ll = sda->ll;
uint64_t key = sda->key;
bplist_t *to_free = sda->to_free;
bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
dsl_deadlist_remove_entry(ll, key, tx);
}
typedef struct livelist_delete_arg {
spa_t *spa;
uint64_t ll_obj;
uint64_t zap_obj;
} livelist_delete_arg_t;
static void
livelist_delete_sync(void *arg, dmu_tx_t *tx)
{
livelist_delete_arg_t *lda = arg;
spa_t *spa = lda->spa;
uint64_t ll_obj = lda->ll_obj;
uint64_t zap_obj = lda->zap_obj;
objset_t *mos = spa->spa_meta_objset;
uint64_t count;
/* free the livelist and decrement the feature count */
VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
dsl_deadlist_free(mos, ll_obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
VERIFY0(zap_count(mos, zap_obj, &count));
if (count == 0) {
/* no more livelists to delete */
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, tx));
VERIFY0(zap_destroy(mos, zap_obj, tx));
spa->spa_livelists_to_delete = 0;
spa_notify_waiters(spa);
}
}
/*
* Load in the value for the livelist to be removed and open it. Then,
* load its first sublist and determine which block pointers should actually
* be freed. Then, call a synctask which performs the actual frees and updates
* the pool-wide livelist data.
*/
static void
spa_livelist_delete_cb(void *arg, zthr_t *z)
{
spa_t *spa = arg;
uint64_t ll_obj = 0, count;
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj = spa->spa_livelists_to_delete;
/*
* Determine the next livelist to delete. This function should only
* be called if there is at least one deleted clone.
*/
VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
VERIFY0(zap_count(mos, ll_obj, &count));
if (count > 0) {
dsl_deadlist_t *ll;
dsl_deadlist_entry_t *dle;
bplist_t to_free;
ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
VERIFY0(dsl_deadlist_open(ll, mos, ll_obj));
dle = dsl_deadlist_first(ll);
ASSERT3P(dle, !=, NULL);
bplist_create(&to_free);
int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
z, NULL);
if (err == 0) {
sublist_delete_arg_t sync_arg = {
.spa = spa,
.ll = ll,
.key = dle->dle_mintxg,
.to_free = &to_free
};
zfs_dbgmsg("deleting sublist (id %llu) from"
" livelist %llu, %lld remaining",
(u_longlong_t)dle->dle_bpobj.bpo_object,
(u_longlong_t)ll_obj, (longlong_t)count - 1);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
sublist_delete_sync, &sync_arg, 0,
ZFS_SPACE_CHECK_DESTROY));
} else {
VERIFY3U(err, ==, EINTR);
}
bplist_clear(&to_free);
bplist_destroy(&to_free);
dsl_deadlist_close(ll);
kmem_free(ll, sizeof (dsl_deadlist_t));
} else {
livelist_delete_arg_t sync_arg = {
.spa = spa,
.ll_obj = ll_obj,
.zap_obj = zap_obj
};
zfs_dbgmsg("deletion of livelist %llu completed",
(u_longlong_t)ll_obj);
VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
&sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
}
}
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
minclsyspri);
}
typedef struct livelist_new_arg {
bplist_t *allocs;
bplist_t *frees;
} livelist_new_arg_t;
static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(tx == NULL);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
} else {
bplist_append(lna->allocs, bp);
zfs_livelist_condense_new_alloc++;
}
return (0);
}
typedef struct livelist_condense_arg {
spa_t *spa;
bplist_t to_keep;
uint64_t first_size;
uint64_t next_size;
} livelist_condense_arg_t;
static void
spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
{
livelist_condense_arg_t *lca = arg;
spa_t *spa = lca->spa;
bplist_t new_frees;
dsl_dataset_t *ds = spa->spa_to_condense.ds;
/* Have we been cancelled? */
if (spa->spa_to_condense.cancelled) {
zfs_livelist_condense_sync_cancel++;
goto out;
}
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
/*
* It's possible that the livelist was changed while the zthr was
* running. Therefore, we need to check for new blkptrs in the two
* entries being condensed and continue to track them in the livelist.
* Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
* it's possible that the newly added blkptrs are FREEs or ALLOCs so
* we need to sort them into two different bplists.
*/
uint64_t first_obj = first->dle_bpobj.bpo_object;
uint64_t next_obj = next->dle_bpobj.bpo_object;
uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
bplist_create(&new_frees);
livelist_new_arg_t new_bps = {
.allocs = &lca->to_keep,
.frees = &new_frees,
};
if (cur_first_size > lca->first_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->first_size));
}
if (cur_next_size > lca->next_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->next_size));
}
dsl_deadlist_clear_entry(first, ll, tx);
ASSERT(bpobj_is_empty(&first->dle_bpobj));
dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
bplist_destroy(&new_frees);
char dsname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
"(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
"(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
(u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
(u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
(u_longlong_t)cur_next_size,
(u_longlong_t)first->dle_bpobj.bpo_object,
(u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
out:
dmu_buf_rele(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
spa->spa_to_condense.syncing = B_FALSE;
}
static void
spa_livelist_condense_cb(void *arg, zthr_t *t)
{
while (zfs_livelist_condense_zthr_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
spa_t *spa = arg;
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
uint64_t first_size, next_size;
livelist_condense_arg_t *lca =
kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
bplist_create(&lca->to_keep);
/*
* Process the livelists (matching FREEs and ALLOCs) in open context
* so we have minimal work in syncing context to condense.
*
* We save bpobj sizes (first_size and next_size) to use later in
* syncing context to determine if entries were added to these sublists
* while in open context. This is possible because the clone is still
* active and open for normal writes and we want to make sure the new,
* unprocessed blockpointers are inserted into the livelist normally.
*
* Note that dsl_process_sub_livelist() both stores the size number of
* blockpointers and iterates over them while the bpobj's lock held, so
* the sizes returned to us are consistent which what was actually
* processed.
*/
int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
&first_size);
if (err == 0)
err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
t, &next_size);
if (err == 0) {
while (zfs_livelist_condense_sync_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_mark_netfree(tx);
dmu_tx_hold_space(tx, 1);
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
if (err == 0) {
/*
* Prevent the condense zthr restarting before
* the synctask completes.
*/
spa->spa_to_condense.syncing = B_TRUE;
lca->spa = spa;
lca->first_size = first_size;
lca->next_size = next_size;
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_livelist_condense_sync, lca, tx);
dmu_tx_commit(tx);
return;
}
}
/*
* Condensing can not continue: either it was externally stopped or
* we were unable to assign to a tx because the pool has run out of
* space. In the second case, we'll just end up trying to condense
* again in a later txg.
*/
ASSERT(err != 0);
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
if (err == EINTR)
zfs_livelist_condense_zthr_cancel++;
}
/*
* Check that there is something to condense but that a condense is not
* already in progress and that condensing has not been cancelled.
*/
static boolean_t
spa_livelist_condense_cb_check(void *arg, zthr_t *z)
{
(void) z;
spa_t *spa = arg;
if ((spa->spa_to_condense.ds != NULL) &&
(spa->spa_to_condense.syncing == B_FALSE) &&
(spa->spa_to_condense.cancelled == B_FALSE)) {
return (B_TRUE);
}
return (B_FALSE);
}
static void
spa_start_livelist_condensing_thread(spa_t *spa)
{
spa->spa_to_condense.ds = NULL;
spa->spa_to_condense.first = NULL;
spa->spa_to_condense.next = NULL;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
spa_livelist_condense_cb, spa, minclsyspri);
}
static void
spa_spawn_aux_threads(spa_t *spa)
{
ASSERT(spa_writeable(spa));
spa_start_raidz_expansion_thread(spa);
spa_start_indirect_condensing_thread(spa);
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
spa_checkpoint_discard_thread, spa, minclsyspri);
}
/*
* Fix up config after a partly-completed split. This is done with the
* ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
* pool have that entry in their config, but only the splitting one contains
* a list of all the guids of the vdevs that are being split off.
*
* This function determines what to do with that list: either rejoin
* all the disks to the pool, or complete the splitting process. To attempt
* the rejoin, each disk that is offlined is marked online again, and
* we do a reopen() call. If the vdev label for every disk that was
* marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
* then we call vdev_split() on each disk, and complete the split.
*
* Otherwise we leave the config alone, with all the vdevs in place in
* the original pool.
*/
static void
spa_try_repair(spa_t *spa, nvlist_t *config)
{
uint_t extracted;
uint64_t *glist;
uint_t i, gcount;
nvlist_t *nvl;
vdev_t **vd;
boolean_t attempt_reopen;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
return;
/* check that the config is complete */
if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
&glist, &gcount) != 0)
return;
vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
/* attempt to online all the vdevs & validate */
attempt_reopen = B_TRUE;
for (i = 0; i < gcount; i++) {
if (glist[i] == 0) /* vdev is hole */
continue;
vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
if (vd[i] == NULL) {
/*
* Don't bother attempting to reopen the disks;
* just do the split.
*/
attempt_reopen = B_FALSE;
} else {
/* attempt to re-online it */
vd[i]->vdev_offline = B_FALSE;
}
}
if (attempt_reopen) {
vdev_reopen(spa->spa_root_vdev);
/* check each device to see what state it's in */
for (extracted = 0, i = 0; i < gcount; i++) {
if (vd[i] != NULL &&
vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
break;
++extracted;
}
}
/*
* If every disk has been moved to the new pool, or if we never
* even attempted to look at them, then we split them off for
* good.
*/
if (!attempt_reopen || gcount == extracted) {
for (i = 0; i < gcount; i++)
if (vd[i] != NULL)
vdev_split(vd[i]);
vdev_reopen(spa->spa_root_vdev);
}
kmem_free(vd, gcount * sizeof (vdev_t *));
}
static int
spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
{
const char *ereport = FM_EREPORT_ZFS_POOL;
int error;
spa->spa_load_state = state;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
spa_import_progress_set_notes(spa, "spa_load()");
gethrestime(&spa->spa_loaded_ts);
error = spa_load_impl(spa, type, &ereport);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
if (error) {
if (error != EEXIST) {
spa->spa_loaded_ts.tv_sec = 0;
spa->spa_loaded_ts.tv_nsec = 0;
}
if (error != EBADF) {
(void) zfs_ereport_post(ereport, spa,
NULL, NULL, NULL, 0);
}
}
spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
spa->spa_ena = 0;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
return (error);
}
#ifdef ZFS_DEBUG
/*
* Count the number of per-vdev ZAPs associated with all of the vdevs in the
* vdev tree rooted in the given vd, and ensure that each ZAP is present in the
* spa's per-vdev ZAP list.
*/
static uint64_t
vdev_count_verify_zaps(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
uint64_t total = 0;
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2) &&
vd->vdev_root_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_root_zap));
}
if (vd->vdev_top_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_top_zap));
}
if (vd->vdev_leaf_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]);
}
return (total);
}
#else
#define vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0)
#endif
/*
* Determine whether the activity check is required.
*/
static boolean_t
spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
nvlist_t *config)
{
uint64_t state = 0;
uint64_t hostid = 0;
uint64_t tryconfig_txg = 0;
uint64_t tryconfig_timestamp = 0;
uint16_t tryconfig_mmp_seq = 0;
nvlist_t *nvinfo;
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
&tryconfig_txg);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
&tryconfig_timestamp);
(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
&tryconfig_mmp_seq);
}
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
/*
* Disable the MMP activity check - This is used by zdb which
* is intended to be used on potentially active pools.
*/
if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
return (B_FALSE);
/*
* Skip the activity check when the MMP feature is disabled.
*/
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
return (B_FALSE);
/*
* If the tryconfig_ values are nonzero, they are the results of an
* earlier tryimport. If they all match the uberblock we just found,
* then the pool has not changed and we return false so we do not test
* a second time.
*/
if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
tryconfig_mmp_seq && tryconfig_mmp_seq ==
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
return (B_FALSE);
/*
* Allow the activity check to be skipped when importing the pool
* on the same host which last imported it. Since the hostid from
* configuration may be stale use the one read from the label.
*/
if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
if (hostid == spa_get_hostid(spa))
return (B_FALSE);
/*
* Skip the activity test when the pool was cleanly exported.
*/
if (state != POOL_STATE_ACTIVE)
return (B_FALSE);
return (B_TRUE);
}
/*
* Nanoseconds the activity check must watch for changes on-disk.
*/
static uint64_t
spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
{
uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
uint64_t multihost_interval = MSEC2NSEC(
MMP_INTERVAL_OK(zfs_multihost_interval));
uint64_t import_delay = MAX(NANOSEC, import_intervals *
multihost_interval);
/*
* Local tunables determine a minimum duration except for the case
* where we know when the remote host will suspend the pool if MMP
* writes do not land.
*
* See Big Theory comment at the top of mmp.c for the reasoning behind
* these cases and times.
*/
ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) > 0) {
/* MMP on remote host will suspend pool after failed writes */
import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
MMP_IMPORT_SAFETY_FACTOR / 100;
zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
"mmp_fails=%llu ub_mmp mmp_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_FAIL_INT(ub),
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)import_intervals);
} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) == 0) {
/* MMP on remote host will never suspend pool */
import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
"mmp_interval=%llu ub_mmp_delay=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals);
} else if (MMP_VALID(ub)) {
/*
* zfs-0.7 compatibility case
*/
import_delay = MAX(import_delay, (multihost_interval +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
"import_intervals=%llu leaves=%u",
(u_longlong_t)import_delay,
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals,
vdev_count_leaves(spa));
} else {
/* Using local tunings is the only reasonable option */
zfs_dbgmsg("pool last imported on non-MMP aware "
"host using import_delay=%llu multihost_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)multihost_interval,
(u_longlong_t)import_intervals);
}
return (import_delay);
}
/*
* Remote host activity check.
*
* error results:
* 0 - no activity detected
* EREMOTEIO - remote activity detected
* EINTR - user canceled the operation
*/
static int
spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config,
boolean_t importing)
{
uint64_t txg = ub->ub_txg;
uint64_t timestamp = ub->ub_timestamp;
uint64_t mmp_config = ub->ub_mmp_config;
uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
uint64_t import_delay;
hrtime_t import_expire, now;
nvlist_t *mmp_label = NULL;
vdev_t *rvd = spa->spa_root_vdev;
kcondvar_t cv;
kmutex_t mtx;
int error = 0;
cv_init(&cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_enter(&mtx);
/*
* If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
* during the earlier tryimport. If the txg recorded there is 0 then
* the pool is known to be active on another host.
*
* Otherwise, the pool might be in use on another host. Check for
* changes in the uberblocks on disk if necessary.
*/
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
vdev_uberblock_load(rvd, ub, &mmp_label);
error = SET_ERROR(EREMOTEIO);
goto out;
}
}
import_delay = spa_activity_check_duration(spa, ub);
/* Add a small random factor in case of simultaneous imports (0-25%) */
import_delay += import_delay * random_in_range(250) / 1000;
import_expire = gethrtime() + import_delay;
if (importing) {
spa_import_progress_set_notes(spa, "Checking MMP activity, "
"waiting %llu ms", (u_longlong_t)NSEC2MSEC(import_delay));
}
int iterations = 0;
while ((now = gethrtime()) < import_expire) {
if (importing && iterations++ % 30 == 0) {
spa_import_progress_set_notes(spa, "Checking MMP "
"activity, %llu ms remaining",
(u_longlong_t)NSEC2MSEC(import_expire - now));
}
if (importing) {
(void) spa_import_progress_set_mmp_check(spa_guid(spa),
NSEC2SEC(import_expire - gethrtime()));
}
vdev_uberblock_load(rvd, ub, &mmp_label);
if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
zfs_dbgmsg("multihost activity detected "
"txg %llu ub_txg %llu "
"timestamp %llu ub_timestamp %llu "
"mmp_config %#llx ub_mmp_config %#llx",
(u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
(u_longlong_t)timestamp,
(u_longlong_t)ub->ub_timestamp,
(u_longlong_t)mmp_config,
(u_longlong_t)ub->ub_mmp_config);
error = SET_ERROR(EREMOTEIO);
break;
}
if (mmp_label) {
nvlist_free(mmp_label);
mmp_label = NULL;
}
error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
if (error != -1) {
error = SET_ERROR(EINTR);
break;
}
error = 0;
}
out:
mutex_exit(&mtx);
mutex_destroy(&mtx);
cv_destroy(&cv);
/*
* If the pool is determined to be active store the status in the
* spa->spa_load_info nvlist. If the remote hostname or hostid are
* available from configuration read from disk store them as well.
* This allows 'zpool import' to generate a more useful message.
*
* ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
* ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
if (mmp_label) {
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
hostname = fnvlist_lookup_string(mmp_label,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
}
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
hostid = fnvlist_lookup_uint64(mmp_label,
ZPOOL_CONFIG_HOSTID);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTID, hostid);
}
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, 0);
error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
}
if (mmp_label)
nvlist_free(mmp_label);
return (error);
}
/*
* Called from zfs_ioc_clear for a pool that was suspended
* after failing mmp write checks.
*/
boolean_t
spa_mmp_remote_host_activity(spa_t *spa)
{
ASSERT(spa_multihost(spa) && spa_suspended(spa));
nvlist_t *best_label;
uberblock_t best_ub;
/*
* Locate the best uberblock on disk
*/
vdev_uberblock_load(spa->spa_root_vdev, &best_ub, &best_label);
if (best_label) {
/*
* confirm that the best hostid matches our hostid
*/
if (nvlist_exists(best_label, ZPOOL_CONFIG_HOSTID) &&
spa_get_hostid(spa) !=
fnvlist_lookup_uint64(best_label, ZPOOL_CONFIG_HOSTID)) {
nvlist_free(best_label);
return (B_TRUE);
}
nvlist_free(best_label);
} else {
return (B_TRUE);
}
if (!MMP_VALID(&best_ub) ||
!MMP_FAIL_INT_VALID(&best_ub) ||
MMP_FAIL_INT(&best_ub) == 0) {
return (B_TRUE);
}
if (best_ub.ub_txg != spa->spa_uberblock.ub_txg ||
best_ub.ub_timestamp != spa->spa_uberblock.ub_timestamp) {
zfs_dbgmsg("txg mismatch detected during pool clear "
"txg %llu ub_txg %llu timestamp %llu ub_timestamp %llu",
(u_longlong_t)spa->spa_uberblock.ub_txg,
(u_longlong_t)best_ub.ub_txg,
(u_longlong_t)spa->spa_uberblock.ub_timestamp,
(u_longlong_t)best_ub.ub_timestamp);
return (B_TRUE);
}
/*
* Perform an activity check looking for any remote writer
*/
return (spa_activity_check(spa, &spa->spa_uberblock, spa->spa_config,
B_FALSE) != 0);
}
static int
spa_verify_host(spa_t *spa, nvlist_t *mos_config)
{
uint64_t hostid;
const char *hostname;
uint64_t myhostid = 0;
if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
hostname = fnvlist_lookup_string(mos_config,
ZPOOL_CONFIG_HOSTNAME);
myhostid = zone_get_hostid(NULL);
if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by "
"another system (host: %s hostid: 0x%llx). "
"See: https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-EY",
spa_name(spa), hostname, (u_longlong_t)hostid);
spa_load_failed(spa, "hostid verification failed: pool "
"last accessed by host: %s (hostid: 0x%llx)",
hostname, (u_longlong_t)hostid);
return (SET_ERROR(EBADF));
}
}
return (0);
}
static int
spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
{
int error = 0;
nvlist_t *nvtree, *nvl, *config = spa->spa_config;
int parse;
vdev_t *rvd;
uint64_t pool_guid;
const char *comment;
const char *compatibility;
/*
* Versioning wasn't explicitly added to the label until later, so if
* it's not present treat it as the initial version.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_POOL_GUID);
return (SET_ERROR(EINVAL));
}
/*
* If we are doing an import, ensure that the pool is not already
* imported by checking if its pool guid already exists in the
* spa namespace.
*
* The only case that we allow an already imported pool to be
* imported again, is when the pool is checkpointed and we want to
* look at its checkpointed state from userland tools like zdb.
*/
#ifdef _KERNEL
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
#else
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0) &&
!spa_importing_readonly_checkpoint(spa)) {
#endif
spa_load_failed(spa, "a pool with guid %llu is already open",
(u_longlong_t)pool_guid);
return (SET_ERROR(EEXIST));
}
spa->spa_config_guid = pool_guid;
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
ASSERT(spa->spa_compatibility == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&spa->spa_config_txg);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
spa->spa_config_splitting = fnvlist_dup(nvl);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_VDEV_TREE);
return (SET_ERROR(EINVAL));
}
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Parse the configuration into a vdev tree. We explicitly set the
* value that will be returned by spa_version() since parsing the
* configuration requires knowing the version number.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "unable to parse config [error=%d]",
error);
return (error);
}
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
if (type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_guid(spa) == pool_guid);
}
return (0);
}
/*
* Recursively open all vdevs in the vdev tree. This function is called twice:
* first with the untrusted config, then with the trusted config.
*/
static int
spa_ld_open_vdevs(spa_t *spa)
{
int error = 0;
/*
* spa_missing_tvds_allowed defines how many top-level vdevs can be
* missing/unopenable for the root vdev to be still considered openable.
*/
if (spa->spa_trust_config) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
} else {
spa->spa_missing_tvds_allowed = 0;
}
spa->spa_missing_tvds_allowed =
MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "vdev tree has %lld missing top-level "
"vdevs.", (u_longlong_t)spa->spa_missing_tvds);
if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
/*
* Although theoretically we could allow users to open
* incomplete pools in RW mode, we'd need to add a lot
* of extra logic (e.g. adjust pool space to account
* for missing vdevs).
* This limitation also prevents users from accidentally
* opening the pool in RW mode during data recovery and
* damaging it further.
*/
spa_load_note(spa, "pools with missing top-level "
"vdevs can only be opened in read-only mode.");
error = SET_ERROR(ENXIO);
} else {
spa_load_note(spa, "current settings allow for maximum "
"%lld missing top-level vdevs at this stage.",
(u_longlong_t)spa->spa_missing_tvds_allowed);
}
}
if (error != 0) {
spa_load_failed(spa, "unable to open vdev tree [error=%d]",
error);
}
if (spa->spa_missing_tvds != 0 || error != 0)
vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
return (error);
}
/*
* We need to validate the vdev labels against the configuration that
* we have in hand. This function is called twice: first with an untrusted
* config, then with a trusted config. The validation is more strict when the
* config is trusted.
*/
static int
spa_ld_validate_vdevs(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_validate(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
return (error);
}
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
spa_load_failed(spa, "cannot open vdev tree after invalidating "
"some vdevs");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
return (0);
}
static void
spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
{
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
spa->spa_first_txg = spa->spa_last_ubsync_txg ?
spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
spa->spa_claim_max_txg = spa->spa_first_txg;
spa->spa_prev_software_version = ub->ub_software_version;
}
static int
spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
{
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *label;
uberblock_t *ub = &spa->spa_uberblock;
boolean_t activity_check = B_FALSE;
/*
* If we are opening the checkpointed state of the pool by
* rewinding to it, at this point we will have written the
* checkpointed uberblock to the vdev labels, so searching
* the labels will find the right uberblock. However, if
* we are opening the checkpointed state read-only, we have
* not modified the labels. Therefore, we must ignore the
* labels and continue using the spa_uberblock that was set
* by spa_ld_checkpoint_rewind.
*
* Note that it would be fine to ignore the labels when
* rewinding (opening writeable) as well. However, if we
* crash just after writing the labels, we will end up
* searching the labels. Doing so in the common case means
* that this code path gets exercised normally, rather than
* just in the edge case.
*/
if (ub->ub_checkpoint_txg != 0 &&
spa_importing_readonly_checkpoint(spa)) {
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
/*
* Find the best uberblock.
*/
vdev_uberblock_load(rvd, ub, &label);
/*
* If we weren't able to find a single valid uberblock, return failure.
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
spa_load_failed(spa, "no valid uberblock found");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
if (spa->spa_load_max_txg != UINT64_MAX) {
(void) spa_import_progress_set_max_txg(spa_guid(spa),
(u_longlong_t)spa->spa_load_max_txg);
}
spa_load_note(spa, "using uberblock with txg=%llu",
(u_longlong_t)ub->ub_txg);
if (ub->ub_raidz_reflow_info != 0) {
spa_load_note(spa, "uberblock raidz_reflow_info: "
"state=%u offset=%llu",
(int)RRSS_GET_STATE(ub),
(u_longlong_t)RRSS_GET_OFFSET(ub));
}
/*
* For pools which have the multihost property on determine if the
* pool is truly inactive and can be safely imported. Prevent
* hosts which don't have a hostid set from importing the pool.
*/
activity_check = spa_activity_check_required(spa, ub, label,
spa->spa_config);
if (activity_check) {
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
spa_get_hostid(spa) == 0) {
nvlist_free(label);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
int error =
spa_activity_check(spa, ub, spa->spa_config, B_TRUE);
if (error) {
nvlist_free(label);
return (error);
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
fnvlist_add_uint16(spa->spa_load_info,
ZPOOL_CONFIG_MMP_SEQ,
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
}
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
spa_load_failed(spa, "version %llu is not supported",
(u_longlong_t)ub->ub_version);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *features;
/*
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
if (label == NULL) {
spa_load_failed(spa, "label config unavailable");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) != 0) {
nvlist_free(label);
spa_load_failed(spa, "invalid label: '%s' missing",
ZPOOL_CONFIG_FEATURES_FOR_READ);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
/*
* Update our in-core representation with the definitive values
* from the label.
*/
nvlist_free(spa->spa_label_features);
spa->spa_label_features = fnvlist_dup(features);
}
nvlist_free(label);
/*
* Look through entries in the label nvlist's features_for_read. If
* there is a feature listed there which we don't understand then we
* cannot open a pool.
*/
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat;
unsup_feat = fnvlist_alloc();
for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) {
fnvlist_add_string(unsup_feat,
nvpair_name(nvp), "");
}
}
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
nvlist_free(unsup_feat);
spa_load_failed(spa, "some features are unsupported");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
nvlist_free(unsup_feat);
}
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_try_repair(spa, spa->spa_config);
spa_config_exit(spa, SCL_ALL, FTAG);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
}
/*
* Initialize internal SPA structures.
*/
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
static int
spa_ld_open_rootbp(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
if (error != 0) {
spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
return (0);
}
static int
spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t reloading)
{
vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv, *mos_config, *policy;
int error = 0, copy_error;
uint64_t healthy_tvds, healthy_tvds_mos;
uint64_t mos_config_txg;
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
!= 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* If we're assembling a pool from a split, the config provided is
* already trusted so there is nothing to do.
*/
if (type == SPA_IMPORT_ASSEMBLE)
return (0);
healthy_tvds = spa_healthy_core_tvds(spa);
if (load_nvlist(spa, spa->spa_config_object, &mos_config)
!= 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* If we are doing an open, pool owner wasn't verified yet, thus do
* the verification here.
*/
if (spa->spa_load_state == SPA_LOAD_OPEN) {
error = spa_verify_host(spa, mos_config);
if (error != 0) {
nvlist_free(mos_config);
return (error);
}
}
nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Build a new vdev tree from the trusted config
*/
error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
if (error != 0) {
nvlist_free(mos_config);
spa_config_exit(spa, SCL_ALL, FTAG);
spa_load_failed(spa, "spa_config_parse failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Vdev paths in the MOS may be obsolete. If the untrusted config was
* obtained by scanning /dev/dsk, then it will have the right vdev
* paths. We update the trusted MOS config with this information.
* We first try to copy the paths with vdev_copy_path_strict, which
* succeeds only when both configs have exactly the same vdev tree.
* If that fails, we fall back to a more flexible method that has a
* best effort policy.
*/
copy_error = vdev_copy_path_strict(rvd, mrvd);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "provided vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
spa_load_note(spa, "MOS vdev tree:");
vdev_dbgmsg_print_tree(mrvd, 2);
}
if (copy_error != 0) {
spa_load_note(spa, "vdev_copy_path_strict failed, falling "
"back to vdev_copy_path_relaxed");
vdev_copy_path_relaxed(rvd, mrvd);
}
vdev_close(rvd);
vdev_free(rvd);
spa->spa_root_vdev = mrvd;
rvd = mrvd;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* If 'zpool import' used a cached config, then the on-disk hostid and
* hostname may be different to the cached config in ways that should
* prevent import. Userspace can't discover this without a scan, but
* we know, so we add these values to LOAD_INFO so the caller can know
* the difference.
*
* Note that we have to do this before the config is regenerated,
* because the new config will have the hostid and hostname for this
* host, in readiness for import.
*/
if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTID))
fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_HOSTID,
fnvlist_lookup_uint64(mos_config, ZPOOL_CONFIG_HOSTID));
if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTNAME))
fnvlist_add_string(spa->spa_load_info, ZPOOL_CONFIG_HOSTNAME,
fnvlist_lookup_string(mos_config, ZPOOL_CONFIG_HOSTNAME));
/*
* We will use spa_config if we decide to reload the spa or if spa_load
* fails and we rewind. We must thus regenerate the config using the
* MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
* pass settings on how to load the pool and is not stored in the MOS.
* We copy it over to our new, trusted config.
*/
mos_config_txg = fnvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_POOL_TXG);
nvlist_free(mos_config);
mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
&policy) == 0)
fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
spa_config_set(spa, mos_config);
spa->spa_config_source = SPA_CONFIG_SRC_MOS;
/*
* Now that we got the config from the MOS, we should be more strict
* in checking blkptrs and can make assumptions about the consistency
* of the vdev tree. spa_trust_config must be set to true before opening
* vdevs in order for them to be writeable.
*/
spa->spa_trust_config = B_TRUE;
/*
* Open and validate the new vdev tree
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "final vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
}
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
!spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
/*
* Sanity check to make sure that we are indeed loading the
* latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
* in the config provided and they happened to be the only ones
* to have the latest uberblock, we could involuntarily perform
* an extreme rewind.
*/
healthy_tvds_mos = spa_healthy_core_tvds(spa);
if (healthy_tvds_mos - healthy_tvds >=
SPA_SYNC_MIN_VDEVS) {
spa_load_note(spa, "config provided misses too many "
"top-level vdevs compared to MOS (%lld vs %lld). ",
(u_longlong_t)healthy_tvds,
(u_longlong_t)healthy_tvds_mos);
spa_load_note(spa, "vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
if (reloading) {
spa_load_failed(spa, "config was already "
"provided from MOS. Aborting.");
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_load_note(spa, "spa must be reloaded using MOS "
"config");
return (SET_ERROR(EAGAIN));
}
}
error = spa_check_for_missing_logs(spa);
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
"guid sum (%llu != %llu)",
(u_longlong_t)spa->spa_uberblock.ub_guid_sum,
(u_longlong_t)rvd->vdev_guid_sum);
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
ENXIO));
}
return (0);
}
static int
spa_ld_open_indirect_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* Everything that we read before spa_remove_init() must be stored
* on concreted vdevs. Therefore we do this as early as possible.
*/
error = spa_remove_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_remove_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Retrieve information needed to condense indirect vdev mappings.
*/
error = spa_condense_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_condense_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
return (0);
}
static int
spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
&spa->spa_feat_for_write_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
&spa->spa_feat_desc_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
enabled_feat = fnvlist_alloc();
unsup_feat = fnvlist_alloc();
if (!spa_features_check(spa, B_FALSE,
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
if (spa_writeable(spa) ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
*missing_feat_writep = B_TRUE;
}
}
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
}
fnvlist_free(enabled_feat);
fnvlist_free(unsup_feat);
if (!missing_feat_read) {
fnvlist_add_boolean(spa->spa_load_info,
ZPOOL_CONFIG_CAN_RDONLY);
}
/*
* If the state is SPA_LOAD_TRYIMPORT, our objective is
* twofold: to determine whether the pool is available for
* import in read-write mode and (if it is not) whether the
* pool is available for import in read-only mode. If the pool
* is available for import in read-write mode, it is displayed
* as available in userland; if it is not available for import
* in read-only mode, it is displayed as unavailable in
* userland. If the pool is available for import in read-only
* mode but not read-write mode, it is displayed as unavailable
* in userland with a special note that the pool is actually
* available for open in read-only mode.
*
* As a result, if the state is SPA_LOAD_TRYIMPORT and we are
* missing a feature for write, we must first determine whether
* the pool can be opened read-only before returning to
* userland in order to know whether to display the
* abovementioned note.
*/
if (missing_feat_read || (*missing_feat_writep &&
spa_writeable(spa))) {
spa_load_failed(spa, "pool uses unsupported features");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization.
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount;
error = feature_get_refcount_from_disk(spa,
&spa_feature_table[i], &refcount);
if (error == 0) {
spa->spa_feat_refcount_cache[i] = refcount;
} else if (error == ENOTSUP) {
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
spa_load_failed(spa, "error getting refcount "
"for feature %s [error=%d]",
spa_feature_table[i].fi_guid, error);
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
}
}
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
&spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Encryption was added before bookmark_v2, even though bookmark_v2
* is now a dependency. If this pool has encryption enabled without
* bookmark_v2, trigger an errata message.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
return (0);
}
static int
spa_ld_load_special_directories(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
if (error != 0) {
spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_get_props(spa_t *spa)
{
int error = 0;
uint64_t obj;
vdev_t *rvd = spa->spa_root_vdev;
/* Grab the checksum salt from the MOS. */
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes);
if (error == ENOENT) {
/* Generate a new salt for subsequent use */
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
spa_load_failed(spa, "unable to retrieve checksum salt from "
"MOS [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
if (error != 0) {
spa_load_failed(spa, "error opening deferred-frees bpobj "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
&spa->spa_creation_version, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
&spa->spa_errlog_scrub, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/* Load the last scrubbed txg. */
error = spa_dir_prop(spa, DMU_POOL_LAST_SCRUBBED_TXG,
&spa->spa_scrubbed_last_txg, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the livelist deletion field. If a livelist is queued for
* deletion, indicate that in the spa
*/
error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
&spa->spa_livelists_to_delete, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the history object. If we have an older pool, this
* will not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the per-vdev ZAP map. If we have an older pool, this will not
* be present; in this case, defer its creation to a later time to
* avoid dirtying the MOS this early / out of sync context. See
* spa_sync_config_object.
*/
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
&spa->spa_all_vdev_zaps, B_FALSE);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
} else if (error != 0) {
nvlist_free(mos_config);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
/*
* An older version of ZFS overwrote the sentinel value, so
* we have orphaned per-vdev ZAPs in the MOS. Defer their
* destruction to later; see spa_sync_config_object.
*/
spa->spa_avz_action = AVZ_ACTION_DESTROY;
/*
* We're assuming that no vdevs have had their ZAPs created
* before this. Better be sure of it.
*/
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
}
nvlist_free(mos_config);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
B_FALSE);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0) {
uint64_t autoreplace = 0;
spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
spa_prop_find(spa, ZPOOL_PROP_DEDUP_TABLE_QUOTA,
&spa->spa_dedup_table_quota);
spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
spa->spa_autoreplace = (autoreplace != 0);
}
/*
* If we are importing a pool with missing top-level vdevs,
* we enforce that the pool doesn't panic or get suspended on
* error since the likelihood of missing data is extremely high.
*/
if (spa->spa_missing_tvds > 0 &&
spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_load_note(spa, "forcing failmode to 'continue' "
"as some top level vdevs are missing");
spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
}
return (0);
}
static int
spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're assembling the pool from the split-off vdevs of
* an existing pool, we don't want to attach the spares & cache
* devices.
*/
/*
* Load any hot spares for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
&spa->spa_spares.sav_config) != 0) {
spa_load_failed(spa, "error loading spares nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
&spa->spa_l2cache.sav_object, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
&spa->spa_l2cache.sav_config) != 0) {
spa_load_failed(spa, "error loading l2cache nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_l2cache.sav_sync = B_TRUE;
}
return (0);
}
static int
spa_ld_load_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If the 'multihost' property is set, then never allow a pool to
* be imported when the system hostid is zero. The exception to
* this rule is zdb which is always allowed to access pools.
*/
if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
(spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
/*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
if (spa->spa_load_state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
}
/*
* Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
*/
error = vdev_load(rvd);
if (error != 0) {
spa_load_failed(spa, "vdev_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
error = spa_ld_log_spacemaps(spa);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Propagate the leaf DTLs we just loaded all the way up the vdev tree.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
spa_config_exit(spa, SCL_ALL, FTAG);
return (0);
}
static int
spa_ld_load_dedup_tables(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = ddt_load(spa);
if (error != 0) {
spa_load_failed(spa, "ddt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_load_brt(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = brt_load(spa);
if (error != 0) {
spa_load_failed(spa, "brt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport)
{
vdev_t *rvd = spa->spa_root_vdev;
if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
boolean_t missing = spa_check_logs(spa);
if (missing) {
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "spa_check_logs failed "
"so dropping the logs");
} else {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
spa_load_failed(spa, "spa_check_logs failed");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
ENXIO));
}
}
}
return (0);
}
static int
spa_ld_verify_pool_data(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
error = spa_load_verify(spa);
if (error != 0) {
spa_load_failed(spa, "spa_load_verify failed "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
}
return (0);
}
static void
spa_ld_claim_log_blocks(spa_t *spa)
{
dmu_tx_t *tx;
dsl_pool_t *dp = spa_get_dsl(spa);
/*
* Claim log blocks that haven't been committed yet.
* This must all happen in a single txg.
* Note: spa_claim_max_txg is updated by spa_claim_notify(),
* invoked from zil_claim_log_block()'s i/o done callback.
* Price of rollback is that we abandon the log.
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
spa_set_log_state(spa, SPA_LOG_GOOD);
}
static void
spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
boolean_t update_config_cache)
{
vdev_t *rvd = spa->spa_root_vdev;
int need_update = B_FALSE;
/*
* If the config cache is stale, or we have uninitialized
* metaslabs (see spa_vdev_add()), then update the config.
*
* If this is a verbatim import, trust the current
* in-core spa_config and update the disk labels.
*/
if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
/*
* Update the config cache asynchronously in case we're the
* root pool, in which case the config cache isn't writable yet.
*/
if (need_update)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
static void
spa_ld_prepare_for_reload(spa_t *spa)
{
spa_mode_t mode = spa->spa_mode;
int async_suspended = spa->spa_async_suspended;
spa_unload(spa);
spa_deactivate(spa);
spa_activate(spa, mode);
/*
* We save the value of spa_async_suspended as it gets reset to 0 by
* spa_unload(). We want to restore it back to the original value before
* returning as we might be calling spa_async_resume() later.
*/
spa->spa_async_suspended = async_suspended;
}
static int
spa_ld_read_checkpoint_txg(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT0(spa->spa_checkpoint_txg);
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_load_thread == curthread);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT)
return (0);
if (error != 0)
return (error);
ASSERT3U(checkpoint.ub_txg, !=, 0);
ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
ASSERT3U(checkpoint.ub_timestamp, !=, 0);
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
return (0);
}
static int
spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
/*
* Never trust the config that is provided unless we are assembling
* a pool following a split.
* This means don't trust blkptrs and the vdev tree in general. This
* also effectively puts the spa in read-only mode since
* spa_writeable() checks for spa_trust_config to be true.
* We will later load a trusted config from the MOS.
*/
if (type != SPA_IMPORT_ASSEMBLE)
spa->spa_trust_config = B_FALSE;
/*
* Parse the config provided to create a vdev tree.
*/
error = spa_ld_parse_config(spa, type);
if (error != 0)
return (error);
spa_import_progress_add(spa);
/*
* Now that we have the vdev tree, try to open each vdev. This involves
* opening the underlying physical device, retrieving its geometry and
* probing the vdev with a dummy I/O. The state of each vdev will be set
* based on the success of those operations. After this we'll be ready
* to read from the vdevs.
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
/*
* Read the label of each vdev and make sure that the GUIDs stored
* there match the GUIDs in the config provided.
* If we're assembling a new pool that's been split off from an
* existing pool, the labels haven't yet been updated so we skip
* validation for now.
*/
if (type != SPA_IMPORT_ASSEMBLE) {
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
}
/*
* Read all vdev labels to find the best uberblock (i.e. latest,
* unless spa_load_max_txg is set) and store it in spa_uberblock. We
* get the list of features required to read blkptrs in the MOS from
* the vdev label with the best uberblock and verify that our version
* of zfs supports them all.
*/
error = spa_ld_select_uberblock(spa, type);
if (error != 0)
return (error);
/*
* Pass that uberblock to the dsl_pool layer which will open the root
* blkptr. This blkptr points to the latest version of the MOS and will
* allow us to read its contents.
*/
error = spa_ld_open_rootbp(spa);
if (error != 0)
return (error);
return (0);
}
static int
spa_ld_checkpoint_rewind(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error != 0) {
spa_load_failed(spa, "unable to retrieve checkpointed "
"uberblock from the MOS config [error=%d]", error);
if (error == ENOENT)
error = ZFS_ERR_NO_CHECKPOINT;
return (error);
}
ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
/*
* We need to update the txg and timestamp of the checkpointed
* uberblock to be higher than the latest one. This ensures that
* the checkpointed uberblock is selected if we were to close and
* reopen the pool right after we've written it in the vdev labels.
* (also see block comment in vdev_uberblock_compare)
*/
checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
checkpoint.ub_timestamp = gethrestime_sec();
/*
* Set current uberblock to be the checkpointed uberblock.
*/
spa->spa_uberblock = checkpoint;
/*
* If we are doing a normal rewind, then the pool is open for
* writing and we sync the "updated" checkpointed uberblock to
* disk. Once this is done, we've basically rewound the whole
* pool and there is no way back.
*
* There are cases when we don't want to attempt and sync the
* checkpointed uberblock to disk because we are opening a
* pool as read-only. Specifically, verifying the checkpointed
* state with zdb, and importing the checkpointed state to get
* a "preview" of its content.
*/
if (spa_writeable(spa)) {
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "failed to write checkpointed "
"uberblock to the vdev labels [error=%d]", error);
return (error);
}
}
return (0);
}
static int
spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t *update_config_cache)
{
int error;
/*
* Parse the config for pool, open and validate vdevs,
* select an uberblock, and use that uberblock to open
* the MOS.
*/
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
/*
* Retrieve the trusted config stored in the MOS and use it to create
* a new, exact version of the vdev tree, then reopen all vdevs.
*/
error = spa_ld_trusted_config(spa, type, B_FALSE);
if (error == EAGAIN) {
if (update_config_cache != NULL)
*update_config_cache = B_TRUE;
/*
* Redo the loading process with the trusted config if it is
* too different from the untrusted config.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "RELOADING");
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
error = spa_ld_trusted_config(spa, type, B_TRUE);
if (error != 0)
return (error);
} else if (error != 0) {
return (error);
}
return (0);
}
/*
* Load an existing storage pool, using the config provided. This config
* describes which vdevs are part of the pool and is later validated against
* partial configs present in each vdev's label and an entire copy of the
* config stored in the MOS.
*/
static int
spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
{
int error = 0;
boolean_t missing_feat_write = B_FALSE;
boolean_t checkpoint_rewind =
(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
boolean_t update_config_cache = B_FALSE;
hrtime_t load_start = gethrtime();
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
spa_load_note(spa, "LOADING");
error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
if (error != 0)
return (error);
/*
* If we are rewinding to the checkpoint then we need to repeat
* everything we've done so far in this function but this time
* selecting the checkpointed uberblock and using that to open
* the MOS.
*/
if (checkpoint_rewind) {
/*
* If we are rewinding to the checkpoint update config cache
* anyway.
*/
update_config_cache = B_TRUE;
/*
* Extract the checkpointed uberblock from the current MOS
* and use this as the pool's uberblock from now on. If the
* pool is imported as writeable we also write the checkpoint
* uberblock to the labels, making the rewind permanent.
*/
error = spa_ld_checkpoint_rewind(spa);
if (error != 0)
return (error);
/*
* Redo the loading process again with the
* checkpointed uberblock.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "LOADING checkpointed uberblock");
error = spa_ld_mos_with_trusted_config(spa, type, NULL);
if (error != 0)
return (error);
}
/*
* Drop the namespace lock for the rest of the function.
*/
spa->spa_load_thread = curthread;
mutex_exit(&spa_namespace_lock);
/*
* Retrieve the checkpoint txg if the pool has a checkpoint.
*/
spa_import_progress_set_notes(spa, "Loading checkpoint txg");
error = spa_ld_read_checkpoint_txg(spa);
if (error != 0)
goto fail;
/*
* Retrieve the mapping of indirect vdevs. Those vdevs were removed
* from the pool and their contents were re-mapped to other vdevs. Note
* that everything that we read before this step must have been
* rewritten on concrete vdevs after the last device removal was
* initiated. Otherwise we could be reading from indirect vdevs before
* we have loaded their mappings.
*/
spa_import_progress_set_notes(spa, "Loading indirect vdev metadata");
error = spa_ld_open_indirect_vdev_metadata(spa);
if (error != 0)
goto fail;
/*
* Retrieve the full list of active features from the MOS and check if
* they are all supported.
*/
spa_import_progress_set_notes(spa, "Checking feature flags");
error = spa_ld_check_features(spa, &missing_feat_write);
if (error != 0)
goto fail;
/*
* Load several special directories from the MOS needed by the dsl_pool
* layer.
*/
spa_import_progress_set_notes(spa, "Loading special MOS directories");
error = spa_ld_load_special_directories(spa);
if (error != 0)
goto fail;
/*
* Retrieve pool properties from the MOS.
*/
spa_import_progress_set_notes(spa, "Loading properties");
error = spa_ld_get_props(spa);
if (error != 0)
goto fail;
/*
* Retrieve the list of auxiliary devices - cache devices and spares -
* and open them.
*/
spa_import_progress_set_notes(spa, "Loading AUX vdevs");
error = spa_ld_open_aux_vdevs(spa, type);
if (error != 0)
goto fail;
/*
* Load the metadata for all vdevs. Also check if unopenable devices
* should be autoreplaced.
*/
spa_import_progress_set_notes(spa, "Loading vdev metadata");
error = spa_ld_load_vdev_metadata(spa);
if (error != 0)
goto fail;
spa_import_progress_set_notes(spa, "Loading dedup tables");
error = spa_ld_load_dedup_tables(spa);
if (error != 0)
goto fail;
spa_import_progress_set_notes(spa, "Loading BRT");
error = spa_ld_load_brt(spa);
if (error != 0)
goto fail;
/*
* Verify the logs now to make sure we don't have any unexpected errors
* when we claim log blocks later.
*/
spa_import_progress_set_notes(spa, "Verifying Log Devices");
error = spa_ld_verify_logs(spa, type, ereport);
if (error != 0)
goto fail;
if (missing_feat_write) {
ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
/*
* At this point, we know that we can open the pool in
* read-only mode but not read-write mode. We now have enough
* information and can return to userland.
*/
error = spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
ENOTSUP);
goto fail;
}
/*
* Traverse the last txgs to make sure the pool was left off in a safe
* state. When performing an extreme rewind, we verify the whole pool,
* which can take a very long time.
*/
spa_import_progress_set_notes(spa, "Verifying pool data");
error = spa_ld_verify_pool_data(spa);
if (error != 0)
goto fail;
/*
* Calculate the deflated space for the pool. This must be done before
* we write anything to the pool because we'd need to update the space
* accounting using the deflated sizes.
*/
spa_import_progress_set_notes(spa, "Calculating deflated space");
spa_update_dspace(spa);
/*
* We have now retrieved all the information we needed to open the
* pool. If we are importing the pool in read-write mode, a few
* additional steps must be performed to finish the import.
*/
spa_import_progress_set_notes(spa, "Starting import");
if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
spa->spa_load_max_txg == UINT64_MAX)) {
uint64_t config_cache_txg = spa->spa_config_txg;
ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
/*
* Before we do any zio_write's, complete the raidz expansion
* scratch space copying, if necessary.
*/
if (RRSS_GET_STATE(&spa->spa_uberblock) == RRSS_SCRATCH_VALID)
vdev_raidz_reflow_copy_scratch(spa);
/*
* In case of a checkpoint rewind, log the original txg
* of the checkpointed uberblock.
*/
if (checkpoint_rewind) {
spa_history_log_internal(spa, "checkpoint rewind",
NULL, "rewound state to txg=%llu",
(u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
}
spa_import_progress_set_notes(spa, "Claiming ZIL blocks");
/*
* Traverse the ZIL and claim all blocks.
*/
spa_ld_claim_log_blocks(spa);
/*
* Kick-off the syncing thread.
*/
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
* claimed log block birth time so that claimed log blocks
* don't appear to be from the future. spa_claim_max_txg
* will have been set for us by ZIL traversal operations
* performed above.
*/
spa_import_progress_set_notes(spa, "Syncing ZIL claims");
txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
/*
* Check if we need to request an update of the config. On the
* next sync, we would update the config stored in vdev labels
* and the cachefile (by default /etc/zfs/zpool.cache).
*/
spa_import_progress_set_notes(spa, "Updating configs");
spa_ld_check_for_config_update(spa, config_cache_txg,
update_config_cache);
/*
* Check if a rebuild was in progress and if so resume it.
* Then check all DTLs to see if anything needs resilvering.
* The resilver will be deferred if a rebuild was started.
*/
spa_import_progress_set_notes(spa, "Starting resilvers");
if (vdev_rebuild_active(spa->spa_root_vdev)) {
vdev_rebuild_restart(spa);
} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/*
* Log the fact that we booted up (so that we can detect if
* we rebooted in the middle of an operation).
*/
spa_history_log_version(spa, "open", NULL);
spa_import_progress_set_notes(spa,
"Restarting device removals");
spa_restart_removal(spa);
spa_spawn_aux_threads(spa);
/*
* Delete any inconsistent datasets.
*
* Note:
* Since we may be issuing deletes for clones here,
* we make sure to do so after we've spawned all the
* auxiliary threads above (from which the livelist
* deletion zthr is part of).
*/
spa_import_progress_set_notes(spa,
"Cleaning up inconsistent objsets");
(void) dmu_objset_find(spa_name(spa),
dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
/*
* Clean up any stale temporary dataset userrefs.
*/
spa_import_progress_set_notes(spa,
"Cleaning up temporary userrefs");
dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_import_progress_set_notes(spa, "Restarting initialize");
vdev_initialize_restart(spa->spa_root_vdev);
spa_import_progress_set_notes(spa, "Restarting TRIM");
vdev_trim_restart(spa->spa_root_vdev);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_import_progress_set_notes(spa, "Finished importing");
}
zio_handle_import_delay(spa, gethrtime() - load_start);
spa_import_progress_remove(spa_guid(spa));
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_load_note(spa, "LOADED");
fail:
mutex_enter(&spa_namespace_lock);
spa->spa_load_thread = NULL;
cv_broadcast(&spa_namespace_cv);
return (error);
}
static int
spa_load_retry(spa_t *spa, spa_load_state_t state)
{
spa_mode_t mode = spa->spa_mode;
spa_unload(spa);
spa_deactivate(spa);
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
spa_activate(spa, mode);
spa_async_suspend(spa);
spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
(u_longlong_t)spa->spa_load_max_txg);
return (spa_load(spa, state, SPA_IMPORT_EXISTING));
}
/*
* If spa_load() fails this function will try loading prior txg's. If
* 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
* will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
* function will not rewind the pool and will return the same error as
* spa_load().
*/
static int
spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
int rewind_flags)
{
nvlist_t *loadinfo = NULL;
nvlist_t *config = NULL;
int load_error, rewind_error;
uint64_t safe_rewind_txg;
uint64_t min_txg;
if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
spa->spa_load_max_txg = spa->spa_load_txg;
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
spa->spa_load_max_txg = max_request;
if (max_request != UINT64_MAX)
spa->spa_extreme_rewind = B_TRUE;
}
load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
if (load_error == 0)
return (0);
if (load_error == ZFS_ERR_NO_CHECKPOINT) {
/*
* When attempting checkpoint-rewind on a pool with no
* checkpoint, we should not attempt to load uberblocks
* from previous txgs when spa_load fails.
*/
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (spa->spa_root_vdev != NULL)
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
if (rewind_flags & ZPOOL_NEVER_REWIND) {
nvlist_free(config);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (state == SPA_LOAD_RECOVER) {
/* Price of rolling back is discarding txgs, including log */
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
/*
* If we aren't rolling back save the load info from our first
* import attempt so that we can restore it after attempting
* to rewind.
*/
loadinfo = spa->spa_load_info;
spa->spa_load_info = fnvlist_alloc();
}
spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
TXG_INITIAL : safe_rewind_txg;
/*
* Continue as long as we're finding errors, we're still within
* the acceptable rewind range, and we're still finding uberblocks
*/
while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
if (spa->spa_load_max_txg < safe_rewind_txg)
spa->spa_extreme_rewind = B_TRUE;
rewind_error = spa_load_retry(spa, state);
}
spa->spa_extreme_rewind = B_FALSE;
spa->spa_load_max_txg = UINT64_MAX;
if (config && (rewind_error || state != SPA_LOAD_RECOVER))
spa_config_set(spa, config);
else
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
ASSERT3P(loadinfo, ==, NULL);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
/* Store the rewind info as part of the initial load info */
fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
spa->spa_load_info);
/* Restore the initial load info */
fnvlist_free(spa->spa_load_info);
spa->spa_load_info = loadinfo;
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
}
/*
* Pool Open/Import
*
* The import case is identical to an open except that the configuration is sent
* down from userland, instead of grabbed from the configuration cache. For the
* case of an open, the pool configuration will exist in the
* POOL_STATE_UNINITIALIZED state.
*
* The stats information (gen/count/ustats) is used to gather vdev statistics at
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, const void *tag,
nvlist_t *nvpolicy, nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
int firstopen = B_FALSE;
*spapp = NULL;
/*
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
zpool_load_policy_t policy;
firstopen = B_TRUE;
zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
&policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa_activate(spa, spa_mode_global);
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
zfs_dbgmsg("spa_open_common: opening %s", pool);
error = spa_load_best(spa, state, policy.zlp_txg,
policy.zlp_rewind);
if (error == EBADF) {
/*
* If vdev_validate() returns failure (indicated by
* EBADF), it indicates that one of the vdevs indicates
* that the pool has been exported or destroyed. If
* this is the case, the config cache is out of sync and
* we should remove the pool from the namespace.
*/
spa_unload(spa);
spa_deactivate(spa);
spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (error) {
/*
* We can't open the pool, but we still have useful
* information: the state of each vdev after the
* attempted vdev_open(). Return this to the user.
*/
if (config != NULL && spa->spa_config) {
*config = fnvlist_dup(spa->spa_config);
fnvlist_add_nvlist(*config,
ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
}
spa_unload(spa);
spa_deactivate(spa);
spa->spa_last_open_failed = error;
if (locked)
mutex_exit(&spa_namespace_lock);
*spapp = NULL;
return (error);
}
}
spa_open_ref(spa, tag);
if (config != NULL)
*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
/*
* If we've recovered the pool, pass back any information we
* gathered while doing the load.
*/
if (state == SPA_LOAD_RECOVER && config != NULL) {
fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
}
if (locked) {
spa->spa_last_open_failed = 0;
spa->spa_last_ubsync_txg = 0;
spa->spa_load_txg = 0;
mutex_exit(&spa_namespace_lock);
}
if (firstopen)
zvol_create_minors_recursive(spa_name(spa));
*spapp = spa;
return (0);
}
int
spa_open_rewind(const char *name, spa_t **spapp, const void *tag,
nvlist_t *policy, nvlist_t **config)
{
return (spa_open_common(name, spapp, tag, policy, config));
}
int
spa_open(const char *name, spa_t **spapp, const void *tag)
{
return (spa_open_common(name, spapp, tag, NULL, NULL));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
*/
spa_t *
spa_inject_addref(char *name)
{
spa_t *spa;
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (NULL);
}
spa->spa_inject_ref++;
mutex_exit(&spa_namespace_lock);
return (spa);
}
void
spa_inject_delref(spa_t *spa)
{
mutex_enter(&spa_namespace_lock);
spa->spa_inject_ref--;
mutex_exit(&spa_namespace_lock);
}
/*
* Add spares device information to the nvlist.
*/
static void
spa_add_spares(spa_t *spa, nvlist_t *config)
{
nvlist_t **spares;
uint_t i, nspares;
nvlist_t *nvroot;
uint64_t guid;
vdev_stat_t *vs;
uint_t vsc;
uint64_t pool;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_spares.sav_count == 0)
return;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares));
if (nspares != 0) {
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
(const nvlist_t * const *)spares, nspares);
VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares));
/*
* Go through and find any spares which have since been
* repurposed as an active spare. If this is the case, update
* their status appropriately.
*/
for (i = 0; i < nspares; i++) {
guid = fnvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID);
VERIFY0(nvlist_lookup_uint64_array(spares[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
if (spa_spare_exists(guid, &pool, NULL) &&
pool != 0ULL) {
vs->vs_state = VDEV_STATE_CANT_OPEN;
vs->vs_aux = VDEV_AUX_SPARED;
} else {
vs->vs_state =
spa->spa_spares.sav_vdevs[i]->vdev_state;
}
}
}
}
/*
* Add l2cache device information to the nvlist, including vdev stats.
*/
static void
spa_add_l2cache(spa_t *spa, nvlist_t *config)
{
nvlist_t **l2cache;
uint_t i, j, nl2cache;
nvlist_t *nvroot;
uint64_t guid;
vdev_t *vd;
vdev_stat_t *vs;
uint_t vsc;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_l2cache.sav_count == 0)
return;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
if (nl2cache != 0) {
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
(const nvlist_t * const *)l2cache, nl2cache);
VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache));
/*
* Update level 2 cache device stats.
*/
for (i = 0; i < nl2cache; i++) {
guid = fnvlist_lookup_uint64(l2cache[i],
ZPOOL_CONFIG_GUID);
vd = NULL;
for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
if (guid ==
spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
vd = spa->spa_l2cache.sav_vdevs[j];
break;
}
}
ASSERT(vd != NULL);
VERIFY0(nvlist_lookup_uint64_array(l2cache[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
vdev_get_stats(vd, vs);
vdev_config_generate_stats(vd, l2cache[i]);
}
}
}
static void
spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
{
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
if (spa->spa_feat_for_read_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_read_obj);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za->za_integer_length == sizeof (uint64_t) &&
za->za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za->za_name,
za->za_first_integer));
}
zap_cursor_fini(&zc);
}
if (spa->spa_feat_for_write_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_write_obj);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za->za_integer_length == sizeof (uint64_t) &&
za->za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za->za_name,
za->za_first_integer));
}
zap_cursor_fini(&zc);
}
zap_attribute_free(za);
}
static void
spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
{
int i;
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t feature = spa_feature_table[i];
uint64_t refcount;
if (feature_get_refcount(spa, &feature, &refcount) != 0)
continue;
VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
}
}
/*
* Store a list of pool features and their reference counts in the
* config.
*
* The first time this is called on a spa, allocate a new nvlist, fetch
* the pool features and reference counts from disk, then save the list
* in the spa. In subsequent calls on the same spa use the saved nvlist
* and refresh its values from the cached reference counts. This
* ensures we don't block here on I/O on a suspended pool so 'zpool
* clear' can resume the pool.
*/
static void
spa_add_feature_stats(spa_t *spa, nvlist_t *config)
{
nvlist_t *features;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
mutex_enter(&spa->spa_feat_stats_lock);
features = spa->spa_feat_stats;
if (features != NULL) {
spa_feature_stats_from_cache(spa, features);
} else {
VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
spa->spa_feat_stats = features;
spa_feature_stats_from_disk(spa, features);
}
VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
features));
mutex_exit(&spa->spa_feat_stats_lock);
}
int
spa_get_stats(const char *name, nvlist_t **config,
char *altroot, size_t buflen)
{
int error;
spa_t *spa;
*config = NULL;
error = spa_open_common(name, &spa, FTAG, NULL, config);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be
* self-inconsistent.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
if (*config != NULL) {
uint64_t loadtimes[2];
loadtimes[0] = spa->spa_loaded_ts.tv_sec;
loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
fnvlist_add_uint64_array(*config,
ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2);
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_ERRCOUNT,
spa_approx_errlog_size(spa));
if (spa_suspended(spa)) {
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode);
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED_REASON,
spa->spa_suspended);
}
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
spa_add_feature_stats(spa, *config);
}
}
/*
* We want to get the alternate root even for faulted pools, so we cheat
* and call spa_lookup() directly.
*/
if (altroot) {
if (spa == NULL) {
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(name);
if (spa)
spa_altroot(spa, altroot, buflen);
else
altroot[0] = '\0';
spa = NULL;
mutex_exit(&spa_namespace_lock);
} else {
spa_altroot(spa, altroot, buflen);
}
}
if (spa != NULL) {
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_close(spa, FTAG);
}
return (error);
}
/*
* Validate that the auxiliary device array is well formed. We must have an
* array of nvlists, each which describes a valid leaf vdev. If this is an
* import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
* specified, as long as they are well-formed.
*/
static int
spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
spa_aux_vdev_t *sav, const char *config, uint64_t version,
vdev_labeltype_t label)
{
nvlist_t **dev;
uint_t i, ndev;
vdev_t *vd;
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* It's acceptable to have no devs specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
return (0);
if (ndev == 0)
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
* checking.
*/
sav->sav_pending = dev;
sav->sav_npending = ndev;
for (i = 0; i < ndev; i++) {
if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
mode)) != 0)
goto out;
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = SET_ERROR(EINVAL);
goto out;
}
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
(error = vdev_label_init(vd, crtxg, label)) == 0) {
fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
vd->vdev_guid);
}
vdev_free(vd);
if (error &&
(mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
goto out;
else
error = 0;
}
out:
sav->sav_pending = NULL;
sav->sav_npending = 0;
return (error);
}
static int
spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
{
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
VDEV_LABEL_SPARE)) != 0) {
return (error);
}
return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
VDEV_LABEL_L2CACHE));
}
static void
spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
const char *config)
{
int i;
if (sav->sav_config != NULL) {
nvlist_t **olddevs;
uint_t oldndevs;
nvlist_t **newdevs;
/*
* Generate new dev list by concatenating with the
* current dev list.
*/
VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config,
&olddevs, &oldndevs));
newdevs = kmem_alloc(sizeof (void *) *
(ndevs + oldndevs), KM_SLEEP);
for (i = 0; i < oldndevs; i++)
newdevs[i] = fnvlist_dup(olddevs[i]);
for (i = 0; i < ndevs; i++)
newdevs[i + oldndevs] = fnvlist_dup(devs[i]);
fnvlist_remove(sav->sav_config, config);
fnvlist_add_nvlist_array(sav->sav_config, config,
(const nvlist_t * const *)newdevs, ndevs + oldndevs);
for (i = 0; i < oldndevs + ndevs; i++)
nvlist_free(newdevs[i]);
kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
} else {
/*
* Generate a new dev list.
*/
sav->sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(sav->sav_config, config,
(const nvlist_t * const *)devs, ndevs);
}
}
/*
* Stop and drop level 2 ARC devices
*/
void
spa_l2cache_drop(spa_t *spa)
{
vdev_t *vd;
int i;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
for (i = 0; i < sav->sav_count; i++) {
uint64_t pool;
vd = sav->sav_vdevs[i];
ASSERT(vd != NULL);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
}
}
/*
* Verify encryption parameters for spa creation. If we are encrypting, we must
* have the encryption feature flag enabled.
*/
static int
spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
boolean_t has_encryption)
{
if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
!has_encryption)
return (SET_ERROR(ENOTSUP));
return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
}
/*
* Pool Creation
*/
int
spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, dsl_crypto_params_t *dcp)
{
spa_t *spa;
const char *altroot = NULL;
vdev_t *rvd;
dsl_pool_t *dp;
dmu_tx_t *tx;
int error = 0;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
uint64_t version, obj, ndraid = 0;
boolean_t has_features;
boolean_t has_encryption;
boolean_t has_allocclass;
spa_feature_t feat;
const char *feat_name;
const char *poolname;
nvlist_t *nvl;
if (props == NULL ||
nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_TNAME), &poolname) != 0)
poolname = (char *)pool;
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Allocate a new spa_t structure.
*/
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
spa = spa_add(poolname, nvl, altroot);
fnvlist_free(nvl);
spa_activate(spa, spa_mode_global);
if (props && (error = spa_prop_validate(spa, props))) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Temporary pool names should never be written to disk.
*/
if (poolname != pool)
spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
has_features = B_FALSE;
has_encryption = B_FALSE;
has_allocclass = B_FALSE;
for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE;
feat_name = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(feat_name, &feat));
if (feat == SPA_FEATURE_ENCRYPTION)
has_encryption = B_TRUE;
if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
has_allocclass = B_TRUE;
}
}
/* verify encryption params, if they were provided */
if (dcp != NULL) {
error = spa_create_check_encryption_params(dcp, has_encryption);
if (error != 0) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
}
if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (ENOTSUP);
}
if (has_features || nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
version = SPA_VERSION;
}
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
spa->spa_first_txg = txg;
spa->spa_uberblock.ub_txg = txg - 1;
spa->spa_uberblock.ub_version = version;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_load_state = SPA_LOAD_CREATE;
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Create the root vdev.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
ASSERT(error != 0 || rvd != NULL);
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
/*
* instantiate the metaslab groups (this will dirty the vdevs)
* we can no longer error exit past this point
*/
for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_metaslab_set_size(vd);
vdev_expand(vd, txg);
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Get the list of spares, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
spa->spa_spares.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
nspares);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Get the list of level 2 cache devices, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP));
fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
nl2cache);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
spa->spa_is_initializing = B_TRUE;
spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
spa->spa_is_initializing = B_FALSE;
/*
* Create DDTs (dedup tables).
*/
ddt_create(spa);
/*
* Create BRT table and BRT table object.
*/
brt_create(spa);
spa_update_dspace(spa);
tx = dmu_tx_create_assigned(dp, txg);
/*
* Create the pool's history object.
*/
if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
spa_history_create_obj(spa, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
spa_history_log_version(spa, "create", tx);
/*
* Create the pool config object.
*/
spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool config");
}
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
sizeof (uint64_t), 1, &version, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool version");
}
/* Newly created pools with the right version are always deflated. */
if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
spa->spa_deflate = TRUE;
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
cmn_err(CE_PANIC, "failed to add deflate");
}
}
/*
* Create the deferred-free bpobj. Turn off compression
* because sync-to-convergence takes longer if the blocksize
* keeps changing.
*/
obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
dmu_object_set_compress(spa->spa_meta_objset, obj,
ZIO_COMPRESS_OFF, tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
sizeof (uint64_t), 1, &obj, tx) != 0) {
cmn_err(CE_PANIC, "failed to add bpobj");
}
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
spa->spa_meta_objset, obj));
/*
* Generate some random noise for salted checksums to operate on.
*/
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
/*
* Set pool properties.
*/
spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
spa->spa_dedup_table_quota =
zpool_prop_default_numeric(ZPOOL_PROP_DEDUP_TABLE_QUOTA);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_sync_props(props, tx);
}
for (int i = 0; i < ndraid; i++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
dmu_tx_commit(tx);
spa->spa_sync_on = B_TRUE;
txg_sync_start(dp);
mmp_thread_start(spa);
txg_wait_synced(dp, txg);
spa_spawn_aux_threads(spa);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
spa->spa_load_state = SPA_LOAD_NONE;
spa_import_os(spa);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Import a non-root pool into the system.
*/
int
spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
{
spa_t *spa;
const char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
zpool_load_policy_t policy;
spa_mode_t mode = spa_mode_global;
uint64_t readonly = B_FALSE;
int error;
nvlist_t *nvroot;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
/*
* If a pool with this name exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Create and initialize the spa structure.
*/
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly)
mode = SPA_MODE_READ;
spa = spa_add(pool, config, altroot);
spa->spa_import_flags = flags;
/*
* Verbatim import - Take a pool and insert it into the namespace
* as if it had been loaded at boot.
*/
if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
return (0);
}
spa_activate(spa, mode);
/*
* Don't start async tasks until we know everything is healthy.
*/
spa_async_suspend(spa);
zpool_get_load_policy(config, &policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
if (state != SPA_LOAD_RECOVER) {
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
zfs_dbgmsg("spa_import: importing %s", pool);
} else {
zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
"(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
}
error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
/*
* Propagate anything learned while loading the pool and pass it
* back to caller (i.e. rewind info, missing devices, etc).
*/
fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Toss any existing sparelist, as it doesn't have any validity
* anymore, and conflicts with spa_has_spare().
*/
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
spa_load_spares(spa);
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
spa_load_l2cache(spa);
}
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_exit(spa, SCL_ALL, FTAG);
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
if (error != 0 || (props && spa_writeable(spa) &&
(error = spa_prop_set(spa, props)))) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
spa_async_resume(spa);
/*
* Override any spares and level 2 cache devices as specified by
* the user, as these may have correct device names/devids, etc.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
if (spa->spa_spares.sav_config)
fnvlist_remove(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES);
else
spa->spa_spares.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
nspares);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
spa->spa_spares.sav_label_sync = B_TRUE;
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
if (spa->spa_l2cache.sav_config)
fnvlist_remove(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE);
else
spa->spa_l2cache.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
nl2cache);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
spa->spa_l2cache.sav_label_sync = B_TRUE;
}
/*
* Check for any removed devices.
*/
if (spa->spa_autoreplace) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
if (spa_writeable(spa)) {
/*
* Update the config cache to include the newly-imported pool.
*/
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
}
/*
* It's possible that the pool was expanded while it was exported.
* We kick off an async task to handle this for us.
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
spa_history_log_version(spa, "import", NULL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
zvol_create_minors_recursive(pool);
spa_import_os(spa);
return (0);
}
nvlist_t *
spa_tryimport(nvlist_t *tryconfig)
{
nvlist_t *config = NULL;
const char *poolname, *cachefile;
spa_t *spa;
uint64_t state;
int error;
zpool_load_policy_t policy;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
return (NULL);
/*
* Create and initialize the spa structure.
*/
char *name = kmem_alloc(MAXPATHLEN, KM_SLEEP);
(void) snprintf(name, MAXPATHLEN, "%s-%llx-%s",
TRYIMPORT_NAME, (u_longlong_t)(uintptr_t)curthread, poolname);
mutex_enter(&spa_namespace_lock);
spa = spa_add(name, tryconfig, NULL);
spa_activate(spa, SPA_MODE_READ);
kmem_free(name, MAXPATHLEN);
/*
* Rewind pool if a max txg was provided.
*/
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_txg != UINT64_MAX) {
spa->spa_load_max_txg = policy.zlp_txg;
spa->spa_extreme_rewind = B_TRUE;
zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
poolname, (longlong_t)policy.zlp_txg);
} else {
zfs_dbgmsg("spa_tryimport: importing %s", poolname);
}
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
== 0) {
zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
} else {
spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
}
/*
* spa_import() relies on a pool config fetched by spa_try_import()
* for spare/cache devices. Import flags are not passed to
* spa_tryimport(), which makes it return early due to a missing log
* device and missing retrieving the cache device and spare eventually.
* Passing ZFS_IMPORT_MISSING_LOG to spa_tryimport() makes it fetch
* the correct configuration regardless of the missing log device.
*/
spa->spa_import_flags |= ZFS_IMPORT_MISSING_LOG;
error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
/*
* If 'tryconfig' was at least parsable, return the current config.
*/
if (spa->spa_root_vdev != NULL) {
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, poolname);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state);
fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
spa->spa_uberblock.ub_timestamp);
fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata);
/*
* If the bootfs property exists on this pool then we
* copy it out so that external consumers can tell which
* pools are bootable.
*/
if ((!error || error == EEXIST) && spa->spa_bootfs) {
char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* We have to play games with the name since the
* pool was opened as TRYIMPORT_NAME.
*/
if (dsl_dsobj_to_dsname(spa_name(spa),
spa->spa_bootfs, tmpname) == 0) {
char *cp;
char *dsname;
dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
cp = strchr(tmpname, '/');
if (cp == NULL) {
(void) strlcpy(dsname, tmpname,
MAXPATHLEN);
} else {
(void) snprintf(dsname, MAXPATHLEN,
"%s/%s", poolname, ++cp);
}
fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS,
dsname);
kmem_free(dsname, MAXPATHLEN);
}
kmem_free(tmpname, MAXPATHLEN);
}
/*
* Add the list of hot spares and level 2 cache devices.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_add_spares(spa, config);
spa_add_l2cache(spa, config);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (config);
}
/*
* Pool export/destroy
*
* The act of destroying or exporting a pool is very simple. We make sure there
* is no more pending I/O and any references to the pool are gone. Then, we
* update the pool state and sync all the labels to disk, removing the
* configuration from the cache afterwards. If the 'hardforce' flag is set, then
* we don't sync the labels or remove the configuration cache.
*/
static int
spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
boolean_t force, boolean_t hardforce)
{
int error = 0;
spa_t *spa;
hrtime_t export_start = gethrtime();
if (oldconfig)
*oldconfig = NULL;
if (!(spa_mode_global & SPA_MODE_WRITE))
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_is_exporting) {
/* the pool is being exported by another thread */
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
}
spa->spa_is_exporting = B_TRUE;
/*
* Put a hold on the pool, drop the namespace lock, stop async tasks
* and see if we can export.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
if (spa->spa_zvol_taskq) {
zvol_remove_minors(spa, spa_name(spa), B_TRUE);
taskq_wait(spa->spa_zvol_taskq);
}
mutex_enter(&spa_namespace_lock);
spa->spa_export_thread = curthread;
spa_close(spa, FTAG);
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
mutex_exit(&spa_namespace_lock);
goto export_spa;
}
/*
* The pool will be in core if it's openable, in which case we can
* modify its state. Objsets may be open only because they're dirty,
* so we have to force it to sync before checking spa_refcnt.
*/
if (spa->spa_sync_on) {
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_evicting_os_wait(spa);
}
/*
* A pool cannot be exported or destroyed if there are active
* references. If we are resetting a pool, allow references by
* fault injection handlers.
*/
if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
error = SET_ERROR(EBUSY);
goto fail;
}
mutex_exit(&spa_namespace_lock);
/*
* At this point we no longer hold the spa_namespace_lock and
* there were no references on the spa. Future spa_lookups will
* notice the spa->spa_export_thread and wait until we signal
* that we are finshed.
*/
if (spa->spa_sync_on) {
vdev_t *rvd = spa->spa_root_vdev;
/*
* A pool cannot be exported if it has an active shared spare.
* This is to prevent other pools stealing the active spare
* from an exported pool. At user's own will, such pool can
* be forcedly exported.
*/
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
error = SET_ERROR(EXDEV);
mutex_enter(&spa_namespace_lock);
goto fail;
}
/*
* We're about to export or destroy this pool. Make sure
* we stop all initialization and trim activity here before
* we set the spa_final_txg. This will ensure that all
* dirty data resulting from the initialization is
* committed to disk before we unload the pool.
*/
vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
l2arc_spa_rebuild_stop(spa);
/*
* We want this to be reflected on every label,
* so mark them all dirty. spa_unload() will do the
* final sync that pushes these changes out.
*/
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_state = new_state;
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
* time flushing as many metaslabs as we can in an attempt to
* destroy log space maps and save import time. This has to be
* done before we set the spa_final_txg, otherwise
* spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs.
* spa_should_flush_logs_on_unload() should be called after
* spa_state has been set to the new_state.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_final_txg = spa_last_synced_txg(spa) +
TXG_DEFER_SIZE + 1;
spa_config_exit(spa, SCL_ALL, FTAG);
}
}
export_spa:
spa_export_os(spa);
if (new_state == POOL_STATE_DESTROYED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
else if (new_state == POOL_STATE_EXPORTED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
if (oldconfig && spa->spa_config)
*oldconfig = fnvlist_dup(spa->spa_config);
if (new_state == POOL_STATE_EXPORTED)
zio_handle_export_delay(spa, gethrtime() - export_start);
/*
* Take the namespace lock for the actual spa_t removal
*/
mutex_enter(&spa_namespace_lock);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
spa_remove(spa);
} else {
/*
* If spa_remove() is not called for this spa_t and
* there is any possibility that it can be reused,
* we make sure to reset the exporting flag.
*/
spa->spa_is_exporting = B_FALSE;
spa->spa_export_thread = NULL;
}
/*
* Wake up any waiters in spa_lookup()
*/
cv_broadcast(&spa_namespace_cv);
mutex_exit(&spa_namespace_lock);
return (0);
fail:
spa->spa_is_exporting = B_FALSE;
spa->spa_export_thread = NULL;
spa_async_resume(spa);
/*
* Wake up any waiters in spa_lookup()
*/
cv_broadcast(&spa_namespace_cv);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Destroy a storage pool.
*/
int
spa_destroy(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
B_FALSE, B_FALSE));
}
/*
* Export a storage pool.
*/
int
spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce)
{
return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
force, hardforce));
}
/*
* Similar to spa_export(), this unloads the spa_t without actually removing it
* from the namespace in any way.
*/
int
spa_reset(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
B_FALSE, B_FALSE));
}
/*
* ==========================================================================
* Device manipulation
* ==========================================================================
*/
/*
* This is called as a synctask to increment the draid feature flag
*/
static void
spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int draid = (int)(uintptr_t)arg;
for (int c = 0; c < draid; c++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
}
/*
* Add a device to a storage pool.
*/
int
spa_vdev_add(spa_t *spa, nvlist_t *nvroot, boolean_t check_ashift)
{
uint64_t txg, ndraid = 0;
int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
&nspares) != 0)
nspares = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
&nl2cache) != 0)
nl2cache = 0;
if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
if (vd->vdev_children != 0 &&
(error = vdev_create(vd, txg, B_FALSE)) != 0) {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* The virtual dRAID spares must be added after vdev tree is created
* and the vdev guids are generated. The guid of their associated
* dRAID is stored in the config and used when opening the spare.
*/
if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
rvd->vdev_children)) == 0) {
if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
nspares = 0;
} else {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* We must validate the spares and l2cache devices after checking the
* children. Otherwise, vdev_inuse() will blindly overwrite the spare.
*/
if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, vd, txg, error));
/*
* If we are in the middle of a device removal, we can only add
* devices which match the existing devices in the pool.
* If we are in the middle of a removal, or have some indirect
* vdevs, we can not add raidz or dRAID top levels.
*/
if (spa->spa_vdev_removal != NULL ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
if (spa->spa_vdev_removal != NULL &&
tvd->vdev_ashift != spa->spa_max_ashift) {
return (spa_vdev_exit(spa, vd, txg, EINVAL));
}
/* Fail if top level vdev is raidz or a dRAID */
if (vdev_get_nparity(tvd) != 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
/*
* Need the top level mirror to be
* a mirror of leaf vdevs only
*/
if (tvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < tvd->vdev_children; cid++) {
vdev_t *cvd = tvd->vdev_child[cid];
if (!cvd->vdev_ops->vdev_op_leaf) {
return (spa_vdev_exit(spa, vd,
txg, EINVAL));
}
}
}
}
}
if (check_ashift && spa->spa_max_ashift == spa->spa_min_ashift) {
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
if (tvd->vdev_ashift != spa->spa_max_ashift) {
return (spa_vdev_exit(spa, vd, txg,
ZFS_ERR_ASHIFT_MISMATCH));
}
}
}
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
tvd->vdev_id = rvd->vdev_children;
vdev_add_child(rvd, tvd);
vdev_config_dirty(tvd);
}
if (nspares != 0) {
spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
ZPOOL_CONFIG_SPARES);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nl2cache != 0) {
spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
ZPOOL_CONFIG_L2CACHE);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* We can't increment a feature while holding spa_vdev so we
* have to do it in a synctask.
*/
if (ndraid != 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
(void *)(uintptr_t)ndraid, tx);
dmu_tx_commit(tx);
}
/*
* We have to be careful when adding new vdevs to an existing pool.
* If other threads start allocating from these vdevs before we
* sync the config cache, and we lose power, then upon reboot we may
* fail to open the pool because there are DVAs that the config cache
* can't translate. Therefore, we first add the vdevs without
* initializing metaslabs; sync the config cache (via spa_vdev_exit());
* and then let spa_config_update() initialize the new metaslabs.
*
* spa_load() checks for added-but-not-initialized vdevs, so that
* if we lose power at any point in this sequence, the remaining
* steps will be completed the next time we load the pool.
*/
(void) spa_vdev_exit(spa, vd, txg, 0);
mutex_enter(&spa_namespace_lock);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Attach a device to a vdev specified by its guid. The vdev type can be
* a mirror, a raidz, or a leaf device that is also a top-level (e.g. a
* single device). When the vdev is a single device, a mirror vdev will be
* automatically inserted.
*
* If 'replacing' is specified, the new device is intended to replace the
* existing device; in this case the two devices are made into their own
* mirror using the 'replacing' vdev, which is functionally identical to
* the mirror vdev (it actually reuses all the same ops) but has a few
* extra rules: you can't attach to it after it's been created, and upon
* completion of resilvering, the first disk (the one being replaced)
* is automatically detached.
*
* If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
* should be performed instead of traditional healing reconstruction. From
* an administrators perspective these are both resilver operations.
*/
int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
int rebuild)
{
uint64_t txg, dtl_max_txg;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare = B_FALSE;
int error;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (rebuild) {
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
if (dsl_scan_resilvering(spa_get_dsl(spa)) ||
dsl_scan_resilver_scheduled(spa_get_dsl(spa))) {
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_RESILVER_IN_PROGRESS));
}
} else {
if (vdev_rebuild_active(rvd))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_REBUILD_IN_PROGRESS));
}
if (spa->spa_vdev_removal != NULL) {
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_DEVRM_IN_PROGRESS));
}
if (oldvd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
boolean_t raidz = oldvd->vdev_ops == &vdev_raidz_ops;
if (raidz) {
if (!spa_feature_is_enabled(spa, SPA_FEATURE_RAIDZ_EXPANSION))
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
* Can't expand a raidz while prior expand is in progress.
*/
if (spa->spa_raidz_expand != NULL) {
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS));
}
} else if (!oldvd->vdev_ops->vdev_op_leaf) {
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
}
if (raidz)
pvd = oldvd;
else
pvd = oldvd->vdev_parent;
if (spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
VDEV_ALLOC_ATTACH) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
if (newrootvd->vdev_children != 1)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
newvd = newrootvd->vdev_child[0];
if (!newvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
return (spa_vdev_exit(spa, newrootvd, txg, error));
/*
* log, dedup and special vdevs should not be replaced by spares.
*/
if ((oldvd->vdev_top->vdev_alloc_bias != VDEV_BIAS_NONE ||
oldvd->vdev_top->vdev_islog) && newvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
/*
* A dRAID spare can only replace a child of its parent dRAID vdev.
*/
if (newvd->vdev_ops == &vdev_draid_spare_ops &&
oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (rebuild) {
/*
* For rebuilds, the top vdev must support reconstruction
* using only space maps. This means the only allowable
* vdevs types are the root vdev, a mirror, or dRAID.
*/
tvd = pvd;
if (pvd->vdev_top != NULL)
tvd = pvd->vdev_top;
if (tvd->vdev_ops != &vdev_mirror_ops &&
tvd->vdev_ops != &vdev_root_ops &&
tvd->vdev_ops != &vdev_draid_ops) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
}
if (!replacing) {
/*
* For attach, the only allowable parent is a mirror or
* the root vdev. A raidz vdev can be attached to, but
* you cannot attach to a raidz child.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops &&
!raidz)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
pvops = &vdev_mirror_ops;
} else {
/*
* Active hot spares can only be replaced by inactive hot
* spares.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
oldvd->vdev_isspare &&
!spa_has_spare(spa, newvd->vdev_guid))
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If the source is a hot spare, and the parent isn't already a
* spare, then we want to create a new hot spare. Otherwise, we
* want to create a replacing vdev. The user is not allowed to
* attach to a spared vdev child unless the 'isspare' state is
* the same (spare replaces spare, non-spare replaces
* non-spare).
*/
if (pvd->vdev_ops == &vdev_replacing_ops &&
spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
} else if (pvd->vdev_ops == &vdev_spare_ops &&
newvd->vdev_isspare != oldvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (newvd->vdev_isspare)
pvops = &vdev_spare_ops;
else
pvops = &vdev_replacing_ops;
}
/*
* Make sure the new device is big enough.
*/
vdev_t *min_vdev = raidz ? oldvd->vdev_child[0] : oldvd;
if (newvd->vdev_asize < vdev_get_min_asize(min_vdev))
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
/*
* The new device cannot have a higher alignment requirement
* than the top-level vdev.
*/
if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) {
return (spa_vdev_exit(spa, newrootvd, txg,
ZFS_ERR_ASHIFT_MISMATCH));
}
/*
* RAIDZ-expansion-specific checks.
*/
if (raidz) {
if (vdev_raidz_attach_check(newvd) != 0)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* Fail early if a child is not healthy or being replaced
*/
for (int i = 0; i < oldvd->vdev_children; i++) {
if (vdev_is_dead(oldvd->vdev_child[i]) ||
!oldvd->vdev_child[i]->vdev_ops->vdev_op_leaf) {
return (spa_vdev_exit(spa, newrootvd, txg,
ENXIO));
}
/* Also fail if reserved boot area is in-use */
if (vdev_check_boot_reserve(spa, oldvd->vdev_child[i])
!= 0) {
return (spa_vdev_exit(spa, newrootvd, txg,
EADDRINUSE));
}
}
}
if (raidz) {
/*
* Note: oldvdpath is freed by spa_strfree(), but
* kmem_asprintf() is freed by kmem_strfree(), so we have to
* move it to a spa_strdup-ed string.
*/
char *tmp = kmem_asprintf("raidz%u-%u",
(uint_t)vdev_get_nparity(oldvd), (uint_t)oldvd->vdev_id);
oldvdpath = spa_strdup(tmp);
kmem_strfree(tmp);
} else {
oldvdpath = spa_strdup(oldvd->vdev_path);
}
newvdpath = spa_strdup(newvd->vdev_path);
/*
* If this is an in-place replacement, update oldvd's path and devid
* to make it distinguishable from newvd, and unopenable from now on.
*/
if (strcmp(oldvdpath, newvdpath) == 0) {
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvdpath) + 5,
KM_SLEEP);
(void) sprintf(oldvd->vdev_path, "%s/old",
newvdpath);
if (oldvd->vdev_devid != NULL) {
spa_strfree(oldvd->vdev_devid);
oldvd->vdev_devid = NULL;
}
spa_strfree(oldvdpath);
oldvdpath = spa_strdup(oldvd->vdev_path);
}
/*
* If the parent is not a mirror, or if we're replacing, insert the new
* mirror/replacing/spare vdev above oldvd.
*/
if (!raidz && pvd->vdev_ops != pvops) {
pvd = vdev_add_parent(oldvd, pvops);
ASSERT(pvd->vdev_ops == pvops);
ASSERT(oldvd->vdev_parent == pvd);
}
ASSERT(pvd->vdev_top->vdev_parent == rvd);
/*
* Extract the new device from its root and add it to pvd.
*/
vdev_remove_child(newrootvd, newvd);
newvd->vdev_id = pvd->vdev_children;
newvd->vdev_crtxg = oldvd->vdev_crtxg;
vdev_add_child(pvd, newvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(pvd);
tvd = newvd->vdev_top;
ASSERT(pvd->vdev_top == tvd);
ASSERT(tvd->vdev_parent == rvd);
vdev_config_dirty(tvd);
/*
* Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
* for any dmu_sync-ed blocks. It will propagate upward when
* spa_vdev_exit() calls vdev_dtl_reassess().
*/
dtl_max_txg = txg + TXG_CONCURRENT_STATES;
if (raidz) {
/*
* Wait for the youngest allocations and frees to sync,
* and then wait for the deferral of those frees to finish.
*/
spa_vdev_config_exit(spa, NULL,
txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
vdev_initialize_stop_all(tvd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(tvd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_wait(tvd);
dtl_max_txg = spa_vdev_config_enter(spa);
tvd->vdev_rz_expanding = B_TRUE;
vdev_dirty_leaves(tvd, VDD_DTL, dtl_max_txg);
vdev_config_dirty(tvd);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool,
dtl_max_txg);
dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_raidz_attach_sync,
newvd, tx);
dmu_tx_commit(tx);
} else {
vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
dtl_max_txg - TXG_INITIAL);
if (newvd->vdev_isspare) {
spa_spare_activate(newvd);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
}
newvd_isspare = newvd->vdev_isspare;
/*
* Mark newvd's DTL dirty in this txg.
*/
vdev_dirty(tvd, VDD_DTL, newvd, txg);
/*
* Schedule the resilver or rebuild to restart in the future.
* We do this to ensure that dmu_sync-ed blocks have been
* stitched into the respective datasets.
*/
if (rebuild) {
newvd->vdev_rebuild_txg = txg;
vdev_rebuild(tvd);
} else {
newvd->vdev_resilver_txg = txg;
if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
spa_feature_is_enabled(spa,
SPA_FEATURE_RESILVER_DEFER)) {
vdev_defer_resilver(newvd);
} else {
dsl_scan_restart_resilver(spa->spa_dsl_pool,
dtl_max_txg);
}
}
}
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
/*
* Commit the config
*/
(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
spa_history_log_internal(spa, "vdev attach", NULL,
"%s vdev=%s %s vdev=%s",
replacing && newvd_isspare ? "spare in" :
replacing ? "replace" : "attach", newvdpath,
replacing ? "for" : "to", oldvdpath);
spa_strfree(oldvdpath);
spa_strfree(newvdpath);
return (0);
}
/*
* Detach a device from a mirror or replacing vdev.
*
* If 'replace_done' is specified, only detach if the parent
* is a replacing or a spare vdev.
*/
int
spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
int error;
vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
ASSERT(spa_writeable(spa));
txg = spa_vdev_detach_enter(spa, guid);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
/*
* Besides being called directly from the userland through the
* ioctl interface, spa_vdev_detach() can be potentially called
* at the end of spa_vdev_resilver_done().
*
* In the regular case, when we have a checkpoint this shouldn't
* happen as we never empty the DTLs of a vdev during the scrub
* [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
* should never get here when we have a checkpoint.
*
* That said, even in a case when we checkpoint the pool exactly
* as spa_vdev_resilver_done() calls this function everything
* should be fine as the resilver will return right away.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (vd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = vd->vdev_parent;
/*
* If the parent/child relationship is not as expected, don't do it.
* Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
* vdev that's replacing B with C. The user's intent in replacing
* is to go from M(A,B) to M(A,C). If the user decides to cancel
* the replace by detaching C, the expected behavior is to end up
* M(A,B). But suppose that right after deciding to detach C,
* the replacement of B completes. We would have M(A,C), and then
* ask to detach C, which would leave us with just A -- not what
* the user wanted. To prevent this, we make sure that the
* parent/child relationship hasn't changed -- in this example,
* that C's parent is still the replacing vdev R.
*/
if (pvd->vdev_guid != pguid && pguid != 0)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
/*
* Only 'replacing' or 'spare' vdevs can be replaced.
*/
if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
spa_version(spa) >= SPA_VERSION_SPARES);
/*
* Only mirror, replacing, and spare vdevs support detach.
*/
if (pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
* If this device has the only valid copy of some data,
* we cannot safely detach it.
*/
if (vdev_dtl_required(vd))
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
ASSERT(pvd->vdev_children >= 2);
/*
* If we are detaching the second disk from a replacing vdev, then
* check to see if we changed the original vdev's path to have "/old"
* at the end in spa_vdev_attach(). If so, undo that change now.
*/
if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path);
for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL)
continue;
if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
strcmp(cvd->vdev_path + len, "/old") == 0) {
spa_strfree(cvd->vdev_path);
cvd->vdev_path = spa_strdup(vd->vdev_path);
break;
}
}
}
/*
* If we are detaching the original disk from a normal spare, then it
* implies that the spare should become a real disk, and be removed
* from the active spare list for the pool. dRAID spares on the
* other hand are coupled to the pool and thus should never be removed
* from the spares list.
*/
if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
if (last_cvd->vdev_isspare &&
last_cvd->vdev_ops != &vdev_draid_spare_ops) {
unspare = B_TRUE;
}
}
/*
* Erase the disk labels so the disk can be used for other things.
* This must be done after all other error cases are handled,
* but before we disembowel vd (so we can still do I/O to it).
* But if we can't do it, don't treat the error as fatal --
* it may be that the unwritability of the disk is the reason
* it's being detached!
*/
(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Remove vd from its parent and compact the parent's children.
*/
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
/*
* Remember one of the remaining children so we can get tvd below.
*/
cvd = pvd->vdev_child[pvd->vdev_children - 1];
/*
* If we need to remove the remaining child from the list of hot spares,
* do it now, marking the vdev as no longer a spare in the process.
* We must do this before vdev_remove_parent(), because that can
* change the GUID if it creates a new toplevel GUID. For a similar
* reason, we must remove the spare now, in the same txg as the detach;
* otherwise someone could attach a new sibling, change the GUID, and
* the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
*/
if (unspare) {
ASSERT(cvd->vdev_isspare);
spa_spare_remove(cvd);
unspare_guid = cvd->vdev_guid;
(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
cvd->vdev_unspare = B_TRUE;
}
/*
* If the parent mirror/replacing vdev only has one child,
* the parent is no longer needed. Remove it from the tree.
*/
if (pvd->vdev_children == 1) {
if (pvd->vdev_ops == &vdev_spare_ops)
cvd->vdev_unspare = B_FALSE;
vdev_remove_parent(cvd);
}
/*
* We don't set tvd until now because the parent we just removed
* may have been the previous top-level vdev.
*/
tvd = cvd->vdev_top;
ASSERT(tvd->vdev_parent == rvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(cvd);
/*
* If the 'autoexpand' property is set on the pool then automatically
* try to expand the size of the pool. For example if the device we
* just detached was smaller than the others, it may be possible to
* add metaslabs (i.e. grow the pool). We need to reopen the vdev
* first so that we can obtain the updated sizes of the leaf vdevs.
*/
if (spa->spa_autoexpand) {
vdev_reopen(tvd);
vdev_expand(tvd, txg);
}
vdev_config_dirty(tvd);
/*
* Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
* vd->vdev_detached is set and free vd's DTL object in syncing context.
* But first make sure we're not on any *other* txg's DTL list, to
* prevent vd from being accessed after it's freed.
*/
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
spa_notify_waiters(spa);
/* hang on to the spa before we release the lock */
spa_open_ref(spa, FTAG);
error = spa_vdev_exit(spa, vd, txg, 0);
spa_history_log_internal(spa, "detach", NULL,
"vdev=%s", vdpath);
spa_strfree(vdpath);
/*
* If this was the removal of the original device in a hot spare vdev,
* then we want to go through and remove the device from the hot spare
* list of every other pool.
*/
if (unspare) {
spa_t *altspa = NULL;
mutex_enter(&spa_namespace_lock);
while ((altspa = spa_next(altspa)) != NULL) {
if (altspa->spa_state != POOL_STATE_ACTIVE ||
altspa == spa)
continue;
spa_open_ref(altspa, FTAG);
mutex_exit(&spa_namespace_lock);
(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
mutex_enter(&spa_namespace_lock);
spa_close(altspa, FTAG);
}
mutex_exit(&spa_namespace_lock);
/* search the rest of the vdevs for spares to remove */
spa_vdev_resilver_done(spa);
}
/* all done with the spa; OK to release */
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
return (error);
}
static int
spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
}
mutex_enter(&vd->vdev_initialize_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate an initialize action we check to see
* if the vdev_initialize_thread is NULL. We do this instead
* of using the vdev_initialize_state since there might be
* a previous initialization process which has completed but
* the thread is not exited.
*/
if (cmd_type == POOL_INITIALIZE_START &&
(vd->vdev_initialize_thread != NULL ||
vd->vdev_top->vdev_removing || vd->vdev_top->vdev_rz_expanding)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
(vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_UNINIT &&
vd->vdev_initialize_thread != NULL) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
}
switch (cmd_type) {
case POOL_INITIALIZE_START:
vdev_initialize(vd);
break;
case POOL_INITIALIZE_CANCEL:
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
break;
case POOL_INITIALIZE_SUSPEND:
vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
break;
case POOL_INITIALIZE_UNINIT:
vdev_uninitialize(vd);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_initialize_lock);
return (0);
}
int
spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping initialization. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the initializing operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
&vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all initialize threads to stop. */
vdev_initialize_stop_wait(spa, &vd_list);
/* Sync out the initializing state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
static int
spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
} else if (!vd->vdev_has_trim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
} else if (secure && !vd->vdev_has_securetrim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
mutex_enter(&vd->vdev_trim_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate a TRIM action we check to see if the
* vdev_trim_thread is NULL. We do this instead of using the
* vdev_trim_state since there might be a previous TRIM process
* which has completed but the thread is not exited.
*/
if (cmd_type == POOL_TRIM_START &&
(vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing ||
vd->vdev_top->vdev_rz_expanding)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_TRIM_CANCEL &&
(vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_TRIM_SUSPEND &&
vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_TRIM_START:
vdev_trim(vd, rate, partial, secure);
break;
case POOL_TRIM_CANCEL:
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
break;
case POOL_TRIM_SUSPEND:
vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_trim_lock);
return (0);
}
/*
* Initiates a manual TRIM for the requested vdevs. This kicks off individual
* TRIM threads for each child vdev. These threads pass over all of the free
* space in the vdev's metaslabs and issues TRIM commands for that space.
*/
int
spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping TRIM. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the TRIM operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
rate, partial, secure, &vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all TRIM threads to stop. */
vdev_trim_stop_wait(spa, &vd_list);
/* Sync out the TRIM state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
/*
* Split a set of devices from their mirrors, and create a new pool from them.
*/
int
spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp)
{
int error = 0;
uint64_t txg, *glist;
spa_t *newspa;
uint_t c, children, lastlog;
nvlist_t **child, *nvl, *tmp;
dmu_tx_t *tx;
const char *altroot = NULL;
vdev_t *rvd, **vml = NULL; /* vdev modify list */
boolean_t activate_slog;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* clear the log and flush everything up to now */
activate_slog = spa_passivate_log(spa);
(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
error = spa_reset_logs(spa);
txg = spa_vdev_config_enter(spa);
if (activate_slog)
spa_activate_log(spa);
if (error != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
/* check new spa name before going any further */
if (spa_lookup(newname) != NULL)
return (spa_vdev_exit(spa, NULL, txg, EEXIST));
/*
* scan through all the children to ensure they're all mirrors
*/
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* first, check to ensure we've got the right child count */
rvd = spa->spa_root_vdev;
lastlog = 0;
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
/* don't count the holes & logs as children */
if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
!vdev_is_concrete(vd))) {
if (lastlog == 0)
lastlog = c;
continue;
}
lastlog = 0;
}
if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* next, ensure no spare or cache devices are part of the split */
if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
/* then, loop over each vdev and validate it */
for (c = 0; c < children; c++) {
uint64_t is_hole = 0;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole != 0) {
if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = SET_ERROR(EINVAL);
break;
}
}
/* deal with indirect vdevs */
if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
&vdev_indirect_ops)
continue;
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = SET_ERROR(ENODEV);
break;
}
/* make sure there's nothing stopping the split */
if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
vml[c]->vdev_islog ||
!vdev_is_concrete(vml[c]) ||
vml[c]->vdev_isspare ||
vml[c]->vdev_isl2cache ||
!vdev_writeable(vml[c]) ||
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c]) ||
vdev_resilver_needed(vml[c], NULL, NULL)) {
error = SET_ERROR(EBUSY);
break;
}
/* we need certain info from the top level */
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
vml[c]->vdev_top->vdev_ms_array);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
vml[c]->vdev_top->vdev_ms_shift);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
vml[c]->vdev_top->vdev_asize);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
vml[c]->vdev_top->vdev_ashift);
/* transfer per-vdev ZAPs */
ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_TOP_ZAP,
vml[c]->vdev_parent->vdev_top_zap));
}
if (error != 0) {
kmem_free(vml, children * sizeof (vdev_t *));
kmem_free(glist, children * sizeof (uint64_t));
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* stop writers from using the disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_TRUE;
}
vdev_reopen(spa->spa_root_vdev);
/*
* Temporarily record the splitting vdevs in the spa config. This
* will disappear once the config is regenerated.
*/
nvl = fnvlist_alloc();
fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children);
kmem_free(glist, children * sizeof (uint64_t));
mutex_enter(&spa->spa_props_lock);
fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl);
mutex_exit(&spa->spa_props_lock);
spa->spa_config_splitting = nvl;
vdev_config_dirty(spa->spa_root_vdev);
/* configure and create the new pool */
fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE);
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_generate_guid(NULL));
VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
/* add the new pool to the namespace */
newspa = spa_add(newname, config, altroot);
newspa->spa_avz_action = AVZ_ACTION_REBUILD;
newspa->spa_config_txg = spa->spa_config_txg;
spa_set_log_state(newspa, SPA_LOG_CLEAR);
/* release the spa config lock, retaining the namespace lock */
spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 1);
spa_activate(newspa, spa_mode_global);
spa_async_suspend(newspa);
/*
* Temporarily stop the initializing and TRIM activity. We set the
* state to ACTIVE so that we know to resume initializing or TRIM
* once the split has completed.
*/
list_t vd_initialize_list;
list_create(&vd_initialize_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
list_t vd_trim_list;
list_create(&vd_trim_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
mutex_enter(&vml[c]->vdev_initialize_lock);
vdev_initialize_stop(vml[c],
VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
mutex_exit(&vml[c]->vdev_initialize_lock);
mutex_enter(&vml[c]->vdev_trim_lock);
vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
mutex_exit(&vml[c]->vdev_trim_lock);
}
}
vdev_initialize_stop_wait(spa, &vd_initialize_list);
vdev_trim_stop_wait(spa, &vd_trim_list);
list_destroy(&vd_initialize_list);
list_destroy(&vd_trim_list);
newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
newspa->spa_is_splitting = B_TRUE;
/* create the new pool from the disks of the original pool */
error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
if (error)
goto out;
/* if that worked, generate a real config for the new pool */
if (newspa->spa_root_vdev != NULL) {
newspa->spa_config_splitting = fnvlist_alloc();
fnvlist_add_uint64(newspa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa));
spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
B_TRUE));
}
/* set the props */
if (props != NULL) {
spa_configfile_set(newspa, props, B_FALSE);
error = spa_prop_set(newspa, props);
if (error)
goto out;
}
/* flush everything */
txg = spa_vdev_config_enter(newspa);
vdev_config_dirty(newspa->spa_root_vdev);
(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 2);
spa_async_resume(newspa);
/* finally, update the original pool's config */
txg = spa_vdev_config_enter(spa);
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0)
dmu_tx_abort(tx);
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
vdev_t *tvd = vml[c]->vdev_top;
/*
* Need to be sure the detachable VDEV is not
* on any *other* txg's DTL list to prevent it
* from being accessed after it's freed.
*/
for (int t = 0; t < TXG_SIZE; t++) {
(void) txg_list_remove_this(
&tvd->vdev_dtl_list, vml[c], t);
}
vdev_split(vml[c]);
if (error == 0)
spa_history_log_internal(spa, "detach", tx,
"vdev=%s", vml[c]->vdev_path);
vdev_free(vml[c]);
}
}
spa->spa_avz_action = AVZ_ACTION_REBUILD;
vdev_config_dirty(spa->spa_root_vdev);
spa->spa_config_splitting = NULL;
nvlist_free(nvl);
if (error == 0)
dmu_tx_commit(tx);
(void) spa_vdev_exit(spa, NULL, txg, 0);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 3);
/* split is complete; log a history record */
spa_history_log_internal(newspa, "split", NULL,
"from pool %s", spa_name(spa));
newspa->spa_is_splitting = B_FALSE;
kmem_free(vml, children * sizeof (vdev_t *));
/* if we're not going to mount the filesystems in userland, export */
if (exp)
error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
B_FALSE, B_FALSE);
return (error);
out:
spa_unload(newspa);
spa_deactivate(newspa);
spa_remove(newspa);
txg = spa_vdev_config_enter(spa);
/* re-online all offlined disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_FALSE;
}
/* restart initializing or trimming disks as necessary */
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
vdev_reopen(spa->spa_root_vdev);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
(void) spa_vdev_exit(spa, NULL, txg, error);
kmem_free(vml, children * sizeof (vdev_t *));
return (error);
}
/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
* currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
}
/*
* Check for a completed replacement. We always consider the first
* vdev in the list to be the oldest vdev, and the last one to be
* the newest (see spa_vdev_attach() for how that works). In
* the case where the newest vdev is faulted, we will not automatically
* remove it after a resilver completes. This is OK as it will require
* user intervention to determine which disk the admin wishes to keep.
*/
if (vd->vdev_ops == &vdev_replacing_ops) {
ASSERT(vd->vdev_children > 1);
newvd = vd->vdev_child[vd->vdev_children - 1];
oldvd = vd->vdev_child[0];
if (vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
}
/*
* Check for a completed resilver with the 'unspare' flag set.
* Also potentially update faulted state.
*/
if (vd->vdev_ops == &vdev_spare_ops) {
vdev_t *first = vd->vdev_child[0];
vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
if (last->vdev_unspare) {
oldvd = first;
newvd = last;
} else if (first->vdev_unspare) {
oldvd = last;
newvd = first;
} else {
oldvd = NULL;
}
if (oldvd != NULL &&
vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
vdev_propagate_state(vd);
/*
* If there are more than two spares attached to a disk,
* and those spares are not required, then we want to
* attempt to free them up now so that they can be used
* by other pools. Once we're back down to a single
* disk+spare, we stop removing them.
*/
if (vd->vdev_children > 2) {
newvd = vd->vdev_child[1];
if (newvd->vdev_isspare && last->vdev_isspare &&
vdev_dtl_empty(last, DTL_MISSING) &&
vdev_dtl_empty(last, DTL_OUTAGE) &&
!vdev_dtl_required(newvd))
return (newvd);
}
}
return (NULL);
}
static void
spa_vdev_resilver_done(spa_t *spa)
{
vdev_t *vd, *pvd, *ppvd;
uint64_t guid, sguid, pguid, ppguid;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
pvd = vd->vdev_parent;
ppvd = pvd->vdev_parent;
guid = vd->vdev_guid;
pguid = pvd->vdev_guid;
ppguid = ppvd->vdev_guid;
sguid = 0;
/*
* If we have just finished replacing a hot spared device, then
* we need to detach the parent's first child (the original hot
* spare) as well.
*/
if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
ppvd->vdev_children == 2) {
ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
sguid = ppvd->vdev_child[1]->vdev_guid;
}
ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
return;
if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
return;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
}
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* If a detach was not performed above replace waiters will not have
* been notified. In which case we must do so now.
*/
spa_notify_waiters(spa);
}
/*
* Update the stored path or FRU for this vdev.
*/
static int
spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
boolean_t ispath)
{
vdev_t *vd;
boolean_t sync = B_FALSE;
ASSERT(spa_writeable(spa));
spa_vdev_state_enter(spa, SCL_ALL);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENOENT));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
if (ispath) {
if (strcmp(value, vd->vdev_path) != 0) {
spa_strfree(vd->vdev_path);
vd->vdev_path = spa_strdup(value);
sync = B_TRUE;
}
} else {
if (vd->vdev_fru == NULL) {
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
} else if (strcmp(value, vd->vdev_fru) != 0) {
spa_strfree(vd->vdev_fru);
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
}
}
return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
}
int
spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
{
return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
}
int
spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
{
return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
}
/*
* ==========================================================================
* SPA Scanning
* ==========================================================================
*/
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
}
int
spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
int
spa_scan(spa_t *spa, pool_scan_func_t func)
{
return (spa_scan_range(spa, func, 0, 0));
}
int
spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
if (func == POOL_SCAN_RESILVER &&
!spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
return (SET_ERROR(ENOTSUP));
if (func != POOL_SCAN_SCRUB && (txgstart != 0 || txgend != 0))
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
*/
if (func == POOL_SCAN_RESILVER &&
!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
return (0);
}
if (func == POOL_SCAN_ERRORSCRUB &&
!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG))
return (SET_ERROR(ENOTSUP));
return (dsl_scan(spa->spa_dsl_pool, func, txgstart, txgend));
}
/*
* ==========================================================================
* SPA async task processing
* ==========================================================================
*/
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE;
vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
/*
* We want to clear the stats, but we don't want to do a full
* vdev_clear() as that will cause us to throw away
* degraded/faulted state as well as attempt to reopen the
* device, all of which is a waste.
*/
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vdev_state_dirty(vd->vdev_top);
/* Tell userspace that the vdev is gone. */
zfs_post_remove(spa, vd);
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_fault_vdev(vdev_t *vd, boolean_t *suspend)
{
if (vd->vdev_fault_wanted) {
vdev_state_t newstate = VDEV_STATE_FAULTED;
vd->vdev_fault_wanted = B_FALSE;
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
if (!vd->vdev_top->vdev_islog && vd->vdev_aux == NULL &&
vdev_dtl_required(vd)) {
newstate = VDEV_STATE_DEGRADED;
/* A required disk is missing so suspend the pool */
*suspend = B_TRUE;
}
vdev_set_state(vd, B_TRUE, newstate, VDEV_AUX_ERR_EXCEEDED);
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_fault_vdev(vd->vdev_child[c], suspend);
}
static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{
if (!spa->spa_autoexpand)
return;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd);
}
if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
return;
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
}
static __attribute__((noreturn)) void
spa_async_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
dsl_pool_t *dp = spa->spa_dsl_pool;
int tasks;
ASSERT(spa->spa_sync_on);
mutex_enter(&spa->spa_async_lock);
tasks = spa->spa_async_tasks;
spa->spa_async_tasks = 0;
mutex_exit(&spa->spa_async_lock);
/*
* See if the config needs to be updated.
*/
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
uint64_t old_space, new_space;
mutex_enter(&spa_namespace_lock);
old_space = metaslab_class_get_space(spa_normal_class(spa));
old_space += metaslab_class_get_space(spa_special_class(spa));
old_space += metaslab_class_get_space(spa_dedup_class(spa));
old_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
new_space = metaslab_class_get_space(spa_normal_class(spa));
new_space += metaslab_class_get_space(spa_special_class(spa));
new_space += metaslab_class_get_space(spa_dedup_class(spa));
new_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);
/*
* If the pool grew as a result of the config update,
* then log an internal history event.
*/
if (new_space != old_space) {
spa_history_log_internal(spa, "vdev online", NULL,
"pool '%s' size: %llu(+%llu)",
spa_name(spa), (u_longlong_t)new_space,
(u_longlong_t)(new_space - old_space));
}
}
/*
* See if any devices need to be marked REMOVED.
*/
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_remove(spa, spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_async_autoexpand(spa, spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/*
* See if any devices need to be marked faulted.
*/
if (tasks & SPA_ASYNC_FAULT_VDEV) {
spa_vdev_state_enter(spa, SCL_NONE);
boolean_t suspend = B_FALSE;
spa_async_fault_vdev(spa->spa_root_vdev, &suspend);
(void) spa_vdev_state_exit(spa, NULL, 0);
if (suspend)
zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
}
/*
* If any devices are done replacing, detach them.
*/
if (tasks & SPA_ASYNC_RESILVER_DONE ||
tasks & SPA_ASYNC_REBUILD_DONE ||
tasks & SPA_ASYNC_DETACH_SPARE) {
spa_vdev_resilver_done(spa);
}
/*
* Kick off a resilver.
*/
if (tasks & SPA_ASYNC_RESILVER &&
!vdev_rebuild_active(spa->spa_root_vdev) &&
(!dsl_scan_resilvering(dp) ||
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
dsl_scan_restart_resilver(dp, 0);
if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_TRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache whole device TRIM.
*/
if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_l2arc(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache rebuilding.
*/
if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
l2arc_spa_rebuild_start(spa);
spa_config_exit(spa, SCL_L2ARC, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Let the world know that we're done.
*/
mutex_enter(&spa->spa_async_lock);
spa->spa_async_thread = NULL;
cv_broadcast(&spa->spa_async_cv);
mutex_exit(&spa->spa_async_lock);
thread_exit();
}
void
spa_async_suspend(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
spa->spa_async_suspended++;
while (spa->spa_async_thread != NULL)
cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
mutex_exit(&spa->spa_async_lock);
spa_vdev_remove_suspend(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_cancel(condense_thread);
zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
if (raidz_expand_thread != NULL)
zthr_cancel(raidz_expand_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_cancel(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_cancel(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_cancel(ll_condense_thread);
}
void
spa_async_resume(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
ASSERT(spa->spa_async_suspended != 0);
spa->spa_async_suspended--;
mutex_exit(&spa->spa_async_lock);
spa_restart_removal(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_resume(condense_thread);
zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
if (raidz_expand_thread != NULL)
zthr_resume(raidz_expand_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_resume(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_resume(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_resume(ll_condense_thread);
}
static boolean_t
spa_async_tasks_pending(spa_t *spa)
{
uint_t non_config_tasks;
uint_t config_task;
boolean_t config_task_suspended;
non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
if (spa->spa_ccw_fail_time == 0) {
config_task_suspended = B_FALSE;
} else {
config_task_suspended =
(gethrtime() - spa->spa_ccw_fail_time) <
((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
}
return (non_config_tasks || (config_task && !config_task_suspended));
}
static void
spa_async_dispatch(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
if (spa_async_tasks_pending(spa) &&
!spa->spa_async_suspended &&
spa->spa_async_thread == NULL)
spa->spa_async_thread = thread_create(NULL, 0,
spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&spa->spa_async_lock);
}
void
spa_async_request(spa_t *spa, int task)
{
zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks |= task;
mutex_exit(&spa->spa_async_lock);
}
int
spa_async_tasks(spa_t *spa)
{
return (spa->spa_async_tasks);
}
/*
* ==========================================================================
* SPA syncing routines
* ==========================================================================
*/
static int
bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
bpobj_t *bpo = arg;
bpobj_enqueue(bpo, bp, bp_freed, tx);
return (0);
}
int
bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
}
int
bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
}
static int
spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zio_t *pio = arg;
zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
pio->io_flags));
return (0);
}
static int
bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (spa_free_sync_cb(arg, bp, tx));
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing frees.
*/
static void
spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
VERIFY(zio_wait(zio) == 0);
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing deferred frees.
*/
static void
spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
/*
* Note:
* If the log space map feature is active, we stop deferring
* frees to the next TXG and therefore running this function
* would be considered a no-op as spa_deferred_bpobj should
* not have any entries.
*
* That said we run this function anyway (instead of returning
* immediately) for the edge-case scenario where we just
* activated the log space map feature in this TXG but we have
* deferred frees from the previous TXG.
*/
zio_t *zio = zio_root(spa, NULL, NULL, 0);
VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
bpobj_spa_free_sync_cb, zio, tx), ==, 0);
VERIFY0(zio_wait(zio));
}
static void
spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
{
char *packed = NULL;
size_t bufsize;
size_t nvsize = 0;
dmu_buf_t *db;
VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
* information. This avoids the dmu_buf_will_dirty() path and
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
memset(packed + nvsize, 0, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
}
static void
spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
const char *config, const char *entry)
{
nvlist_t *nvroot;
nvlist_t **list;
int i;
if (!sav->sav_sync)
return;
/*
* Update the MOS nvlist describing the list of available devices.
* spa_validate_aux() will have already made sure this nvlist is
* valid and the vdevs are labeled appropriately.
*/
if (sav->sav_object == 0) {
sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
&sav->sav_object, tx) == 0);
}
nvroot = fnvlist_alloc();
if (sav->sav_count == 0) {
fnvlist_add_nvlist_array(nvroot, config,
(const nvlist_t * const *)NULL, 0);
} else {
list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
B_FALSE, VDEV_CONFIG_L2CACHE);
fnvlist_add_nvlist_array(nvroot, config,
(const nvlist_t * const *)list, sav->sav_count);
for (i = 0; i < sav->sav_count; i++)
nvlist_free(list[i]);
kmem_free(list, sav->sav_count * sizeof (void *));
}
spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
nvlist_free(nvroot);
sav->sav_sync = B_FALSE;
}
/*
* Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
* The all-vdev ZAP must be empty.
*/
static void
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
if (vd->vdev_root_zap != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_AVZ_V2)) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_root_zap, tx));
}
if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_top_zap, tx));
}
if (vd->vdev_leaf_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx);
}
}
static void
spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
{
nvlist_t *config;
/*
* If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
* its config may not be dirty but we still need to build per-vdev ZAPs.
* Similarly, if the pool is being assembled (e.g. after a split), we
* need to rebuild the AVZ although the config may not be dirty.
*/
if (list_is_empty(&spa->spa_config_dirty_list) &&
spa->spa_avz_action == AVZ_ACTION_NONE)
return;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
/* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
uint64_t vdzap = za->za_first_integer;
if (zap_lookup_int(spa->spa_meta_objset, new_avz,
vdzap) == ENOENT) {
/*
* ZAP is listed in old AVZ but not in new one;
* destroy it
*/
VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
tx));
}
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
/* Destroy the old AVZ */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
/* Replace the old AVZ in the dir obj with the new one */
VERIFY0(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
sizeof (new_avz), 1, &new_avz, tx));
spa->spa_all_vdev_zaps = new_avz;
} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
/* Walk through the AVZ and destroy all listed ZAPs */
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
uint64_t zap = za->za_first_integer;
VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
/* Destroy and unlink the AVZ itself */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
VERIFY0(zap_remove(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
spa->spa_all_vdev_zaps = 0;
}
if (spa->spa_all_vdev_zaps == 0) {
spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_VDEV_ZAP_MAP, tx);
}
spa->spa_avz_action = AVZ_ACTION_NONE;
/* Create ZAPs for vdevs that don't have them. */
vdev_construct_zaps(spa->spa_root_vdev, tx);
config = spa_config_generate(spa, spa->spa_root_vdev,
dmu_tx_get_txg(tx), B_FALSE);
/*
* If we're upgrading the spa version then make sure that
* the config object gets updated with the correct version.
*/
if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa->spa_uberblock.ub_version);
spa_config_exit(spa, SCL_STATE, FTAG);
nvlist_free(spa->spa_config_syncing);
spa->spa_config_syncing = config;
spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
}
static void
spa_sync_version(void *arg, dmu_tx_t *tx)
{
uint64_t *versionp = arg;
uint64_t version = *versionp;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
/*
* Setting the version is special cased when first creating the pool.
*/
ASSERT(tx->tx_txg != TXG_INITIAL);
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
ASSERT(version >= spa_version(spa));
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_history_log_internal(spa, "set", tx, "version=%lld",
(longlong_t)version);
}
/*
* Set zpool properties.
*/
static void
spa_sync_props(void *arg, dmu_tx_t *tx)
{
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvp, elem))) {
uint64_t intval;
const char *strval, *fname;
zpool_prop_t prop;
const char *propname;
const char *elemname = nvpair_name(elem);
zprop_type_t proptype;
spa_feature_t fid;
switch (prop = zpool_name_to_prop(elemname)) {
case ZPOOL_PROP_VERSION:
intval = fnvpair_value_uint64(elem);
/*
* The version is synced separately before other
* properties and should be correct by now.
*/
ASSERT3U(spa_version(spa), >=, intval);
break;
case ZPOOL_PROP_ALTROOT:
/*
* 'altroot' is a non-persistent property. It should
* have been set temporarily at creation or import time.
*/
ASSERT(spa->spa_root != NULL);
break;
case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE:
/*
* 'readonly' and 'cachefile' are also non-persistent
* properties.
*/
break;
case ZPOOL_PROP_COMMENT:
strval = fnvpair_value_string(elem);
if (spa->spa_comment != NULL)
spa_strfree(spa->spa_comment);
spa->spa_comment = spa_strdup(strval);
/*
* We need to dirty the configuration on all the vdevs
* so that their labels get updated. We also need to
* update the cache file to keep it in sync with the
* MOS version. It's unnecessary to do this for pool
* creation since the vdev's configuration has already
* been dirtied.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", elemname, strval);
break;
case ZPOOL_PROP_COMPATIBILITY:
strval = fnvpair_value_string(elem);
if (spa->spa_compatibility != NULL)
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = spa_strdup(strval);
/*
* Dirty the configuration on vdevs as above.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
case ZPOOL_PROP_INVAL:
if (zpool_prop_feature(elemname)) {
fname = strchr(elemname, '@') + 1;
VERIFY0(zfeature_lookup_name(fname, &fid));
spa_feature_enable(spa, fid, tx);
spa_history_log_internal(spa, "set", tx,
"%s=enabled", elemname);
break;
} else if (!zfs_prop_user(elemname)) {
ASSERT(zpool_prop_feature(elemname));
break;
}
zfs_fallthrough;
default:
/*
* Set pool property values in the poolprops mos object.
*/
if (spa->spa_pool_props_object == 0) {
spa->spa_pool_props_object =
zap_create_link(mos, DMU_OT_POOL_PROPS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
tx);
}
/* normalize the property name */
if (prop == ZPOOL_PROP_INVAL) {
propname = elemname;
proptype = PROP_TYPE_STRING;
} else {
propname = zpool_prop_to_name(prop);
proptype = zpool_prop_get_type(prop);
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
if (strlen(strval) == 0) {
/* remove the property if value == "" */
(void) zap_remove(mos,
spa->spa_pool_props_object,
propname, tx);
} else {
VERIFY0(zap_update(mos,
spa->spa_pool_props_object,
propname, 1, strlen(strval) + 1,
strval, tx));
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", elemname, strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(zpool_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
8, 1, &intval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%lld", elemname,
(longlong_t)intval);
switch (prop) {
case ZPOOL_PROP_DELEGATION:
spa->spa_delegation = intval;
break;
case ZPOOL_PROP_BOOTFS:
spa->spa_bootfs = intval;
break;
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
case ZPOOL_PROP_AUTOTRIM:
spa->spa_autotrim = intval;
spa_async_request(spa,
SPA_ASYNC_AUTOTRIM_RESTART);
break;
case ZPOOL_PROP_AUTOEXPAND:
spa->spa_autoexpand = intval;
if (tx->tx_txg != TXG_INITIAL)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
case ZPOOL_PROP_MULTIHOST:
spa->spa_multihost = intval;
break;
case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
spa->spa_dedup_table_quota = intval;
break;
default:
break;
}
} else {
ASSERT(0); /* not allowed */
}
}
}
mutex_exit(&spa->spa_props_lock);
}
/*
* Perform one-time upgrade on-disk changes. spa_version() does not
* reflect the new version this txg, so there must be no changes this
* txg to anything that the upgrade code depends on after it executes.
* Therefore this must be called after dsl_pool_sync() does the sync
* tasks.
*/
static void
spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
dsl_pool_create_origin(dp, tx);
/* Keeping the origin open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
dsl_pool_upgrade_clones(dp, tx);
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
dsl_pool_upgrade_dir_clones(dp, tx);
/* Keeping the freedir open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
spa_feature_create_zap_objects(spa, tx);
}
/*
* LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
* when possibility to use lz4 compression for metadata was added
* Old pools that have this feature enabled must be upgraded to have
* this feature active
*/
if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
boolean_t lz4_en = spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS);
boolean_t lz4_ac = spa_feature_is_active(spa,
SPA_FEATURE_LZ4_COMPRESS);
if (lz4_en && !lz4_ac)
spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
}
/*
* If we haven't written the salt, do so now. Note that the
* feature may not be activated yet, but that's fine since
* the presence of this ZAP entry is backwards compatible.
*/
if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT) == ENOENT) {
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes, tx));
}
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
static void
vdev_indirect_state_sync_verify(vdev_t *vd)
{
vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(vim != NULL);
ASSERT(vib != NULL);
}
uint64_t obsolete_sm_object = 0;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
space_map_allocated(vd->vdev_obsolete_sm));
}
ASSERT(vd->vdev_obsolete_segments != NULL);
/*
* Since frees / remaps to an indirect vdev can only
* happen in syncing context, the obsolete segments
* tree must be empty when we start syncing.
*/
- ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
+ ASSERT0(zfs_range_tree_space(vd->vdev_obsolete_segments));
}
/*
* Set the top-level vdev's max queue depth. Evaluate each top-level's
* async write queue depth in case it changed. The max queue depth will
* not change in the middle of syncing out this txg.
*/
static void
spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100;
metaslab_class_t *normal = spa_normal_class(spa);
metaslab_class_t *special = spa_special_class(spa);
metaslab_class_t *dedup = spa_dedup_class(spa);
uint64_t slots_per_allocator = 0;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || !metaslab_group_initialized(mg))
continue;
metaslab_class_t *mc = mg->mg_class;
if (mc != normal && mc != special && mc != dedup)
continue;
/*
* It is safe to do a lock-free check here because only async
* allocations look at mg_max_alloc_queue_depth, and async
* allocations all happen from spa_sync().
*/
for (int i = 0; i < mg->mg_allocators; i++) {
ASSERT0(zfs_refcount_count(
&(mg->mg_allocator[i].mga_alloc_queue_depth)));
}
mg->mg_max_alloc_queue_depth = max_queue_depth;
for (int i = 0; i < mg->mg_allocators; i++) {
mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
zfs_vdev_def_queue_depth;
}
slots_per_allocator += zfs_vdev_def_queue_depth;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
mca_alloc_slots));
normal->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
special->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
dedup->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
}
normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
}
static void
spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_indirect_state_sync_verify(vd);
if (vdev_indirect_should_condense(vd)) {
spa_condense_indirect_start_sync(vd, tx);
break;
}
}
}
static void
spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
uint64_t txg = tx->tx_txg;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
do {
int pass = ++spa->spa_sync_pass;
spa_sync_config_object(spa, tx);
spa_sync_aux_dev(spa, &spa->spa_spares, tx,
ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
spa_errlog_sync(spa, txg);
dsl_pool_sync(dp, txg);
if (pass < zfs_sync_pass_deferred_free ||
spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
/*
* If the log space map feature is active we don't
* care about deferred frees and the deferred bpobj
* as the log space map should effectively have the
* same results (i.e. appending only to one object).
*/
spa_sync_frees(spa, free_bpl, tx);
} else {
/*
* We can not defer frees in pass 1, because
* we sync the deferred frees later in pass 1.
*/
ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
&spa->spa_deferred_bpobj, tx);
}
brt_sync(spa, txg);
ddt_sync(spa, txg);
dsl_scan_sync(dp, tx);
dsl_errorscrub_sync(dp, tx);
svr_sync(spa, tx);
spa_sync_upgrades(spa, tx);
spa_flush_metaslabs(spa, tx);
vdev_t *vd = NULL;
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
!= NULL)
vdev_sync(vd, txg);
if (pass == 1) {
/*
* dsl_pool_sync() -> dp_sync_tasks may have dirtied
* the config. If that happens, this txg should not
* be a no-op. So we must sync the config to the MOS
* before checking for no-op.
*
* Note that when the config is dirty, it will
* be written to the MOS (i.e. the MOS will be
* dirtied) every time we call spa_sync_config_object()
* in this txg. Therefore we can't call this after
* dsl_pool_sync() every pass, because it would
* prevent us from converging, since we'd dirty
* the MOS every pass.
*
* Sync tasks can only be processed in pass 1, so
* there's no need to do this in later passes.
*/
spa_sync_config_object(spa, tx);
}
/*
* Note: We need to check if the MOS is dirty because we could
* have marked the MOS dirty without updating the uberblock
* (e.g. if we have sync tasks but no dirty user data). We need
* to check the uberblock's rootbp because it is updated if we
* have synced out dirty data (though in this case the MOS will
* most likely also be dirty due to second order effects, we
* don't want to rely on that here).
*/
if (pass == 1 &&
BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp) < txg &&
!dmu_objset_is_dirty(mos, txg)) {
/*
* Nothing changed on the first pass, therefore this
* TXG is a no-op. Avoid syncing deferred frees, so
* that we can keep this TXG as a no-op.
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
break;
}
spa_sync_deferred_frees(spa, tx);
} while (dmu_objset_is_dirty(mos, txg));
}
/*
* Rewrite the vdev configuration (which includes the uberblock) to
* commit the transaction group.
*
* If there are no dirty vdevs, we sync the uberblock to a few random
* top-level vdevs that are known to be visible in the config cache
* (see spa_vdev_add() for a complete description). If there *are* dirty
* vdevs, sync the uberblock to all vdevs.
*/
static void
spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t txg = tx->tx_txg;
for (;;) {
int error = 0;
/*
* We hold SCL_STATE to prevent vdev open/close/etc.
* while we're attempting to write the vdev labels.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (list_is_empty(&spa->spa_config_dirty_list)) {
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd =
rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 ||
vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, txg);
} else {
error = vdev_config_sync(rvd->vdev_child,
rvd->vdev_children, txg);
}
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_STATE, FTAG);
if (error == 0)
break;
zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
}
/*
* Sync the specified transaction group. New blocks may be dirtied as
* part of the process, so we iterate until it converges.
*/
void
spa_sync(spa_t *spa, uint64_t txg)
{
vdev_t *vd = NULL;
VERIFY(spa_writeable(spa));
/*
* Wait for i/os issued in open context that need to complete
* before this txg syncs.
*/
(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/*
* Now that there can be no more cloning in this transaction group,
* but we are still before issuing frees, we can process pending BRT
* updates.
*/
brt_pending_apply(spa, txg);
/*
* Lock out configuration changes.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa->spa_syncing_txg = txg;
spa->spa_sync_pass = 0;
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* If there are any pending vdev state changes, convert them
* into config changes that go out with this transaction group.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
/* Avoid holding the write lock unless actually necessary */
if (vd->vdev_aux == NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
continue;
}
/*
* We need the write lock here because, for aux vdevs,
* calling vdev_config_dirty() modifies sav_config.
* This is ugly and will become unnecessary when we
* eliminate the aux vdev wart by integrating all vdevs
* into the root vdev tree.
*/
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
}
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
spa_config_exit(spa, SCL_STATE, FTAG);
dsl_pool_t *dp = spa->spa_dsl_pool;
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
spa->spa_sync_starttime = gethrtime();
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
NSEC_TO_TICK(spa->spa_deadman_synctime));
/*
* If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
* set spa_deflate if we have no raid-z vdevs.
*/
if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
vdev_t *rvd = spa->spa_root_vdev;
int i;
for (i = 0; i < rvd->vdev_children; i++) {
vd = rvd->vdev_child[i];
if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
break;
}
if (i == rvd->vdev_children) {
spa->spa_deflate = TRUE;
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx));
}
}
spa_sync_adjust_vdev_max_queue_depth(spa);
spa_sync_condense_indirect(spa, tx);
spa_sync_iterate_to_convergence(spa, tx);
#ifdef ZFS_DEBUG
if (!list_is_empty(&spa->spa_config_dirty_list)) {
/*
* Make sure that the number of ZAPs for all the vdevs matches
* the number of ZAPs in the per-vdev ZAP list. This only gets
* called if the config is dirty; otherwise there may be
* outstanding AVZ operations that weren't completed in
* spa_sync_config_object.
*/
uint64_t all_vdev_zap_entry_count;
ASSERT0(zap_count(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
all_vdev_zap_entry_count);
}
#endif
if (spa->spa_vdev_removal != NULL) {
ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
}
spa_sync_rewrite_vdev_config(spa, tx);
dmu_tx_commit(tx);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = 0;
/*
* Clear the dirty config list.
*/
while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
vdev_config_clean(vd);
/*
* Now that the new config has synced transactionally,
* let it become visible to the config cache.
*/
if (spa->spa_config_syncing != NULL) {
spa_config_set(spa, spa->spa_config_syncing);
spa->spa_config_txg = txg;
spa->spa_config_syncing = NULL;
}
dsl_pool_sync_done(dp, txg);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* Update usable space statistics.
*/
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
!= NULL)
vdev_sync_done(vd, txg);
metaslab_class_evict_old(spa->spa_normal_class, txg);
metaslab_class_evict_old(spa->spa_log_class, txg);
/* spa_embedded_log_class has only one metaslab per vdev. */
metaslab_class_evict_old(spa->spa_special_class, txg);
metaslab_class_evict_old(spa->spa_dedup_class, txg);
spa_sync_close_syncing_log_sm(spa);
spa_update_dspace(spa);
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON)
vdev_autotrim_kick(spa);
/*
* It had better be the case that we didn't dirty anything
* since vdev_config_sync().
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
while (zfs_pause_spa_sync)
delay(1);
spa->spa_sync_pass = 0;
/*
* Update the last synced uberblock here. We want to do this at
* the end of spa_sync() so that consumers of spa_last_synced_txg()
* will be guaranteed that all the processing associated with
* that txg has been completed.
*/
spa->spa_ubsync = spa->spa_uberblock;
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_handle_ignored_writes(spa);
/*
* If any async tasks have been requested, kick them off.
*/
spa_async_dispatch(spa);
}
/*
* Sync all pools. We don't want to hold the namespace lock across these
* operations, so we take a reference on the spa_t and drop the lock during the
* sync.
*/
void
spa_sync_allpools(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
}
taskq_t *
spa_sync_tq_create(spa_t *spa, const char *name)
{
kthread_t **kthreads;
ASSERT(spa->spa_sync_tq == NULL);
ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus);
/*
* - do not allow more allocators than cpus.
* - there may be more cpus than allocators.
* - do not allow more sync taskq threads than allocators or cpus.
*/
int nthreads = spa->spa_alloc_count;
spa->spa_syncthreads = kmem_zalloc(sizeof (spa_syncthread_info_t) *
nthreads, KM_SLEEP);
spa->spa_sync_tq = taskq_create_synced(name, nthreads, minclsyspri,
nthreads, INT_MAX, TASKQ_PREPOPULATE, &kthreads);
VERIFY(spa->spa_sync_tq != NULL);
VERIFY(kthreads != NULL);
spa_syncthread_info_t *ti = spa->spa_syncthreads;
for (int i = 0; i < nthreads; i++, ti++) {
ti->sti_thread = kthreads[i];
ti->sti_allocator = i;
}
kmem_free(kthreads, sizeof (*kthreads) * nthreads);
return (spa->spa_sync_tq);
}
void
spa_sync_tq_destroy(spa_t *spa)
{
ASSERT(spa->spa_sync_tq != NULL);
taskq_wait(spa->spa_sync_tq);
taskq_destroy(spa->spa_sync_tq);
kmem_free(spa->spa_syncthreads,
sizeof (spa_syncthread_info_t) * spa->spa_alloc_count);
spa->spa_sync_tq = NULL;
}
uint_t
spa_acq_allocator(spa_t *spa)
{
int i;
if (spa->spa_alloc_count == 1)
return (0);
mutex_enter(&spa->spa_allocs_use->sau_lock);
uint_t r = spa->spa_allocs_use->sau_rotor;
do {
if (++r == spa->spa_alloc_count)
r = 0;
} while (spa->spa_allocs_use->sau_inuse[r]);
spa->spa_allocs_use->sau_inuse[r] = B_TRUE;
spa->spa_allocs_use->sau_rotor = r;
mutex_exit(&spa->spa_allocs_use->sau_lock);
spa_syncthread_info_t *ti = spa->spa_syncthreads;
for (i = 0; i < spa->spa_alloc_count; i++, ti++) {
if (ti->sti_thread == curthread) {
ti->sti_allocator = r;
break;
}
}
ASSERT3S(i, <, spa->spa_alloc_count);
return (r);
}
void
spa_rel_allocator(spa_t *spa, uint_t allocator)
{
if (spa->spa_alloc_count > 1)
spa->spa_allocs_use->sau_inuse[allocator] = B_FALSE;
}
void
spa_select_allocator(zio_t *zio)
{
zbookmark_phys_t *bm = &zio->io_bookmark;
spa_t *spa = zio->io_spa;
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
/*
* A gang block (for example) may have inherited its parent's
* allocator, in which case there is nothing further to do here.
*/
if (ZIO_HAS_ALLOCATOR(zio))
return;
ASSERT(spa != NULL);
ASSERT(bm != NULL);
/*
* First try to use an allocator assigned to the syncthread, and set
* the corresponding write issue taskq for the allocator.
* Note, we must have an open pool to do this.
*/
if (spa->spa_sync_tq != NULL) {
spa_syncthread_info_t *ti = spa->spa_syncthreads;
for (int i = 0; i < spa->spa_alloc_count; i++, ti++) {
if (ti->sti_thread == curthread) {
zio->io_allocator = ti->sti_allocator;
return;
}
}
}
/*
* We want to try to use as many allocators as possible to help improve
* performance, but we also want logically adjacent IOs to be physically
* adjacent to improve sequential read performance. We chunk each object
* into 2^20 block regions, and then hash based on the objset, object,
* level, and region to accomplish both of these goals.
*/
uint64_t hv = cityhash4(bm->zb_objset, bm->zb_object, bm->zb_level,
bm->zb_blkid >> 20);
zio->io_allocator = (uint_t)hv % spa->spa_alloc_count;
}
/*
* ==========================================================================
* Miscellaneous routines
* ==========================================================================
*/
/*
* Remove all pools in the system.
*/
void
spa_evict_all(void)
{
spa_t *spa;
/*
* Remove all cached state. All pools should be closed now,
* so every spa in the AVL tree should be unreferenced.
*/
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(NULL)) != NULL) {
/*
* Stop async tasks. The async thread may need to detach
* a device that's been replaced, which requires grabbing
* spa_namespace_lock, so we must drop it here.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
}
vdev_t *
spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
{
vdev_t *vd;
int i;
if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
return (vd);
if (aux) {
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd = spa->spa_l2cache.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
}
return (NULL);
}
void
spa_upgrade(spa_t *spa, uint64_t version)
{
ASSERT(spa_writeable(spa));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* This should only be called for a non-faulted pool, and since a
* future version would result in an unopenable pool, this shouldn't be
* possible.
*/
ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
}
static boolean_t
spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav)
{
(void) spa;
int i;
uint64_t vdev_guid;
for (i = 0; i < sav->sav_count; i++)
if (sav->sav_vdevs[i]->vdev_guid == guid)
return (B_TRUE);
for (i = 0; i < sav->sav_npending; i++) {
if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
&vdev_guid) == 0 && vdev_guid == guid)
return (B_TRUE);
}
return (B_FALSE);
}
boolean_t
spa_has_l2cache(spa_t *spa, uint64_t guid)
{
return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache));
}
boolean_t
spa_has_spare(spa_t *spa, uint64_t guid)
{
return (spa_has_aux_vdev(spa, guid, &spa->spa_spares));
}
/*
* Check if a pool has an active shared spare device.
* Note: reference count of an active spare is 2, as a spare and as a replace
*/
static boolean_t
spa_has_active_shared_spare(spa_t *spa)
{
int i, refcnt;
uint64_t pool;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++) {
if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
&refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
refcnt > 2)
return (B_TRUE);
}
return (B_FALSE);
}
uint64_t
spa_total_metaslabs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t m = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
m += vd->vdev_ms_count;
}
return (m);
}
/*
* Notify any waiting threads that some activity has switched from being in-
* progress to not-in-progress so that the thread can wake up and determine
* whether it is finished waiting.
*/
void
spa_notify_waiters(spa_t *spa)
{
/*
* Acquiring spa_activities_lock here prevents the cv_broadcast from
* happening between the waiting thread's check and cv_wait.
*/
mutex_enter(&spa->spa_activities_lock);
cv_broadcast(&spa->spa_activities_cv);
mutex_exit(&spa->spa_activities_lock);
}
/*
* Notify any waiting threads that the pool is exporting, and then block until
* they are finished using the spa_t.
*/
void
spa_wake_waiters(spa_t *spa)
{
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters_cancel = B_TRUE;
cv_broadcast(&spa->spa_activities_cv);
while (spa->spa_waiters != 0)
cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
spa->spa_waiters_cancel = B_FALSE;
mutex_exit(&spa->spa_activities_lock);
}
/* Whether the vdev or any of its descendants are being initialized/trimmed. */
static boolean_t
spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
&vd->vdev_initialize_lock : &vd->vdev_trim_lock;
mutex_exit(&spa->spa_activities_lock);
mutex_enter(lock);
mutex_enter(&spa->spa_activities_lock);
boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
(vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
(vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
mutex_exit(lock);
if (in_progress)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
activity))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* If use_guid is true, this checks whether the vdev specified by guid is
* being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
* is being initialized/trimmed. The caller must hold the config lock and
* spa_activities_lock.
*/
static int
spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
zpool_wait_activity_t activity, boolean_t *in_progress)
{
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
vdev_t *vd;
if (use_guid) {
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (EINVAL);
}
} else {
vd = spa->spa_root_vdev;
}
*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (0);
}
/*
* Locking for waiting threads
* ---------------------------
*
* Waiting threads need a way to check whether a given activity is in progress,
* and then, if it is, wait for it to complete. Each activity will have some
* in-memory representation of the relevant on-disk state which can be used to
* determine whether or not the activity is in progress. The in-memory state and
* the locking used to protect it will be different for each activity, and may
* not be suitable for use with a cvar (e.g., some state is protected by the
* config lock). To allow waiting threads to wait without any races, another
* lock, spa_activities_lock, is used.
*
* When the state is checked, both the activity-specific lock (if there is one)
* and spa_activities_lock are held. In some cases, the activity-specific lock
* is acquired explicitly (e.g. the config lock). In others, the locking is
* internal to some check (e.g. bpobj_is_empty). After checking, the waiting
* thread releases the activity-specific lock and, if the activity is in
* progress, then cv_waits using spa_activities_lock.
*
* The waiting thread is woken when another thread, one completing some
* activity, updates the state of the activity and then calls
* spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
* needs to hold its activity-specific lock when updating the state, and this
* lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
*
* Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
* and because it is held when the waiting thread checks the state of the
* activity, it can never be the case that the completing thread both updates
* the activity state and cv_broadcasts in between the waiting thread's check
* and cv_wait. Thus, a waiting thread can never miss a wakeup.
*
* In order to prevent deadlock, when the waiting thread does its check, in some
* cases it will temporarily drop spa_activities_lock in order to acquire the
* activity-specific lock. The order in which spa_activities_lock and the
* activity specific lock are acquired in the waiting thread is determined by
* the order in which they are acquired in the completing thread; if the
* completing thread calls spa_notify_waiters with the activity-specific lock
* held, then the waiting thread must also acquire the activity-specific lock
* first.
*/
static int
spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
switch (activity) {
case ZPOOL_WAIT_CKPT_DISCARD:
*in_progress =
(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
ENOENT);
break;
case ZPOOL_WAIT_FREE:
*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
!bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
spa_livelist_delete_check(spa));
break;
case ZPOOL_WAIT_INITIALIZE:
case ZPOOL_WAIT_TRIM:
error = spa_vdev_activity_in_progress(spa, use_tag, tag,
activity, in_progress);
break;
case ZPOOL_WAIT_REPLACE:
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
break;
case ZPOOL_WAIT_REMOVE:
*in_progress = (spa->spa_removing_phys.sr_state ==
DSS_SCANNING);
break;
case ZPOOL_WAIT_RESILVER:
*in_progress = vdev_rebuild_active(spa->spa_root_vdev);
if (*in_progress)
break;
zfs_fallthrough;
case ZPOOL_WAIT_SCRUB:
{
boolean_t scanning, paused, is_scrub;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
paused = dsl_scan_is_paused_scrub(scn);
*in_progress = (scanning && !paused &&
is_scrub == (activity == ZPOOL_WAIT_SCRUB));
break;
}
case ZPOOL_WAIT_RAIDZ_EXPAND:
{
vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
*in_progress = (vre != NULL && vre->vre_state == DSS_SCANNING);
break;
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
static int
spa_wait_common(const char *pool, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *waited)
{
/*
* The tag is used to distinguish between instances of an activity.
* 'initialize' and 'trim' are the only activities that we use this for.
* The other activities can only have a single instance in progress in a
* pool at one time, making the tag unnecessary.
*
* There can be multiple devices being replaced at once, but since they
* all finish once resilvering finishes, we don't bother keeping track
* of them individually, we just wait for them all to finish.
*/
if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
activity != ZPOOL_WAIT_TRIM)
return (EINVAL);
if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
return (EINVAL);
spa_t *spa;
int error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
/*
* Increment the spa's waiter count so that we can call spa_close and
* still ensure that the spa_t doesn't get freed before this thread is
* finished with it when the pool is exported. We want to call spa_close
* before we start waiting because otherwise the additional ref would
* prevent the pool from being exported or destroyed throughout the
* potentially long wait.
*/
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters++;
spa_close(spa, FTAG);
*waited = B_FALSE;
for (;;) {
boolean_t in_progress;
error = spa_activity_in_progress(spa, activity, use_tag, tag,
&in_progress);
if (error || !in_progress || spa->spa_waiters_cancel)
break;
*waited = B_TRUE;
if (cv_wait_sig(&spa->spa_activities_cv,
&spa->spa_activities_lock) == 0) {
error = EINTR;
break;
}
}
spa->spa_waiters--;
cv_signal(&spa->spa_waiters_cv);
mutex_exit(&spa->spa_activities_lock);
return (error);
}
/*
* Wait for a particular instance of the specified activity to complete, where
* the instance is identified by 'tag'
*/
int
spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
}
/*
* Wait for all instances of the specified activity complete
*/
int
spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
}
sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
sysevent_t *ev = NULL;
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
if (resource) {
ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
ev->resource = resource;
}
#else
(void) spa, (void) vd, (void) hist_nvl, (void) name;
#endif
return (ev);
}
void
spa_event_post(sysevent_t *ev)
{
#ifdef _KERNEL
if (ev) {
zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
kmem_free(ev, sizeof (*ev));
}
#else
(void) ev;
#endif
}
/*
* Post a zevent corresponding to the given sysevent. The 'name' must be one
* of the event definitions in sys/sysevent/eventdefs.h. The payload will be
* filled in from the spa and (optionally) the vdev. This doesn't do anything
* in the userland libzpool, as we don't want consumers to misinterpret ztest
* or zdb as real changes.
*/
void
spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
}
/* state manipulation functions */
EXPORT_SYMBOL(spa_open);
EXPORT_SYMBOL(spa_open_rewind);
EXPORT_SYMBOL(spa_get_stats);
EXPORT_SYMBOL(spa_create);
EXPORT_SYMBOL(spa_import);
EXPORT_SYMBOL(spa_tryimport);
EXPORT_SYMBOL(spa_destroy);
EXPORT_SYMBOL(spa_export);
EXPORT_SYMBOL(spa_reset);
EXPORT_SYMBOL(spa_async_request);
EXPORT_SYMBOL(spa_async_suspend);
EXPORT_SYMBOL(spa_async_resume);
EXPORT_SYMBOL(spa_inject_addref);
EXPORT_SYMBOL(spa_inject_delref);
EXPORT_SYMBOL(spa_scan_stat_init);
EXPORT_SYMBOL(spa_scan_get_stats);
/* device manipulation */
EXPORT_SYMBOL(spa_vdev_add);
EXPORT_SYMBOL(spa_vdev_attach);
EXPORT_SYMBOL(spa_vdev_detach);
EXPORT_SYMBOL(spa_vdev_setpath);
EXPORT_SYMBOL(spa_vdev_setfru);
EXPORT_SYMBOL(spa_vdev_split_mirror);
/* spare statech is global across all pools) */
EXPORT_SYMBOL(spa_spare_add);
EXPORT_SYMBOL(spa_spare_remove);
EXPORT_SYMBOL(spa_spare_exists);
EXPORT_SYMBOL(spa_spare_activate);
/* L2ARC statech is global across all pools) */
EXPORT_SYMBOL(spa_l2cache_add);
EXPORT_SYMBOL(spa_l2cache_remove);
EXPORT_SYMBOL(spa_l2cache_exists);
EXPORT_SYMBOL(spa_l2cache_activate);
EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */
EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_range);
EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */
EXPORT_SYMBOL(spa_sync); /* only for DMU use */
EXPORT_SYMBOL(spa_sync_allpools);
/* properties */
EXPORT_SYMBOL(spa_prop_set);
EXPORT_SYMBOL(spa_prop_get);
EXPORT_SYMBOL(spa_prop_clear_bootfs);
/* asynchronous event notification */
EXPORT_SYMBOL(spa_event_notify);
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_pct, UINT, ZMOD_RW,
"Percentage of CPUs to run a metaslab preload taskq");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW,
"log2 fraction of arc that can be used by inflight I/Os when "
"verifying pool during import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
"Set to traverse metadata on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
"Set to traverse data on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
"Print vdev tree to zfs_dbgmsg during pool import");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RW,
"Percentage of CPUs to run an IO worker thread");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RW,
"Number of threads per IO worker taskqueue");
ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW,
"Allow importing pool with up to this number of missing top-level "
"vdevs (in read-only mode)");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT,
ZMOD_RW, "Set the livelist condense zthr to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT,
ZMOD_RW, "Set the livelist condense synctask to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel,
INT, ZMOD_RW,
"Whether livelist condensing was canceled in the synctask");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel,
INT, ZMOD_RW,
"Whether livelist condensing was canceled in the zthr function");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT,
ZMOD_RW,
"Whether extra ALLOC blkptrs were added to a livelist entry while it "
"was being condensed");
#ifdef _KERNEL
ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_read,
spa_taskq_read_param_set, spa_taskq_read_param_get, ZMOD_RW,
"Configure IO queues for read IO");
ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_write,
spa_taskq_write_param_set, spa_taskq_write_param_get, ZMOD_RW,
"Configure IO queues for write IO");
#endif
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_write_tpq, UINT, ZMOD_RW,
"Number of CPUs per write issue taskq");
diff --git a/sys/contrib/openzfs/module/zfs/spa_checkpoint.c b/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
index 4c3721c159be..5fbf474b0ece 100644
--- a/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
+++ b/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
@@ -1,638 +1,638 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017 by Delphix. All rights reserved.
*/
/*
* Storage Pool Checkpoint
*
* A storage pool checkpoint can be thought of as a pool-wide snapshot or
* a stable version of extreme rewind that guarantees no blocks from the
* checkpointed state will have been overwritten. It remembers the entire
* state of the storage pool (e.g. snapshots, dataset names, etc..) from the
* point that it was taken and the user can rewind back to that point even if
* they applied destructive operations on their datasets or even enabled new
* zpool on-disk features. If a pool has a checkpoint that is no longer
* needed, the user can discard it.
*
* == On disk data structures used ==
*
* - The pool has a new feature flag and a new entry in the MOS. The feature
* flag is set to active when we create the checkpoint and remains active
* until the checkpoint is fully discarded. The entry in the MOS config
* (DMU_POOL_ZPOOL_CHECKPOINT) is populated with the uberblock that
* references the state of the pool when we take the checkpoint. The entry
* remains populated until we start discarding the checkpoint or we rewind
* back to it.
*
* - Each vdev contains a vdev-wide space map while the pool has a checkpoint,
* which persists until the checkpoint is fully discarded. The space map
* contains entries that have been freed in the current state of the pool
* but we want to keep around in case we decide to rewind to the checkpoint.
* [see vdev_checkpoint_sm]
*
* - Each metaslab's ms_sm space map behaves the same as without the
* checkpoint, with the only exception being the scenario when we free
* blocks that belong to the checkpoint. In this case, these blocks remain
* ALLOCATED in the metaslab's space map and they are added as FREE in the
* vdev's checkpoint space map.
*
* - Each uberblock has a field (ub_checkpoint_txg) which holds the txg that
* the uberblock was checkpointed. For normal uberblocks this field is 0.
*
* == Overview of operations ==
*
* - To create a checkpoint, we first wait for the current TXG to be synced,
* so we can use the most recently synced uberblock (spa_ubsync) as the
* checkpointed uberblock. Then we use an early synctask to place that
* uberblock in MOS config, increment the feature flag for the checkpoint
* (marking it active), and setting spa_checkpoint_txg (see its use below)
* to the TXG of the checkpointed uberblock. We use an early synctask for
* the aforementioned operations to ensure that no blocks were dirtied
* between the current TXG and the TXG of the checkpointed uberblock
* (e.g the previous txg).
*
* - When a checkpoint exists, we need to ensure that the blocks that
* belong to the checkpoint are freed but never reused. This means that
* these blocks should never end up in the ms_allocatable or the ms_freeing
* trees of a metaslab. Therefore, whenever there is a checkpoint the new
* ms_checkpointing tree is used in addition to the aforementioned ones.
*
* Whenever a block is freed and we find out that it is referenced by the
* checkpoint (we find out by comparing its birth to spa_checkpoint_txg),
* we place it in the ms_checkpointing tree instead of the ms_freeingtree.
* This way, we divide the blocks that are being freed into checkpointed
* and not-checkpointed blocks.
*
* In order to persist these frees, we write the extents from the
* ms_freeingtree to the ms_sm as usual, and the extents from the
* ms_checkpointing tree to the vdev_checkpoint_sm. This way, these
* checkpointed extents will remain allocated in the metaslab's ms_sm space
* map, and therefore won't be reused [see metaslab_sync()]. In addition,
* when we discard the checkpoint, we can find the entries that have
* actually been freed in vdev_checkpoint_sm.
* [see spa_checkpoint_discard_thread_sync()]
*
* - To discard the checkpoint we use an early synctask to delete the
* checkpointed uberblock from the MOS config, set spa_checkpoint_txg to 0,
* and wakeup the discarding zthr thread (an open-context async thread).
* We use an early synctask to ensure that the operation happens before any
* new data end up in the checkpoint's data structures.
*
* Once the synctask is done and the discarding zthr is awake, we discard
* the checkpointed data over multiple TXGs by having the zthr prefetching
* entries from vdev_checkpoint_sm and then starting a synctask that places
* them as free blocks into their respective ms_allocatable and ms_sm
* structures.
* [see spa_checkpoint_discard_thread()]
*
* When there are no entries left in the vdev_checkpoint_sm of all
* top-level vdevs, a final synctask runs that decrements the feature flag.
*
* - To rewind to the checkpoint, we first use the current uberblock and
* open the MOS so we can access the checkpointed uberblock from the MOS
* config. After we retrieve the checkpointed uberblock, we use it as the
* current uberblock for the pool by writing it to disk with an updated
* TXG, opening its version of the MOS, and moving on as usual from there.
* [see spa_ld_checkpoint_rewind()]
*
* An important note on rewinding to the checkpoint has to do with how we
* handle ZIL blocks. In the scenario of a rewind, we clear out any ZIL
* blocks that have not been claimed by the time we took the checkpoint
* as they should no longer be valid.
* [see comment in zil_claim()]
*
* == Miscellaneous information ==
*
* - In the hypothetical event that we take a checkpoint, remove a vdev,
* and attempt to rewind, the rewind would fail as the checkpointed
* uberblock would reference data in the removed device. For this reason
* and others of similar nature, we disallow the following operations that
* can change the config:
* vdev removal and attach/detach, mirror splitting, and pool reguid.
*
* - As most of the checkpoint logic is implemented in the SPA and doesn't
* distinguish datasets when it comes to space accounting, having a
* checkpoint can potentially break the boundaries set by dataset
* reservations.
*/
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/metaslab_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/spa_checkpoint.h>
#include <sys/vdev_impl.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
/*
* The following parameter limits the amount of memory to be used for the
* prefetching of the checkpoint space map done on each vdev while
* discarding the checkpoint.
*
* The reason it exists is because top-level vdevs with long checkpoint
* space maps can potentially take up a lot of memory depending on the
* amount of checkpointed data that has been freed within them while
* the pool had a checkpoint.
*/
static uint64_t zfs_spa_discard_memory_limit = 16 * 1024 * 1024;
int
spa_checkpoint_get_stats(spa_t *spa, pool_checkpoint_stat_t *pcs)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_NO_CHECKPOINT));
memset(pcs, 0, sizeof (pool_checkpoint_stat_t));
int error = zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT);
ASSERT(error == 0 || error == ENOENT);
if (error == ENOENT)
pcs->pcs_state = CS_CHECKPOINT_DISCARDING;
else
pcs->pcs_state = CS_CHECKPOINT_EXISTS;
pcs->pcs_space = spa->spa_checkpoint_info.sci_dspace;
pcs->pcs_start_time = spa->spa_checkpoint_info.sci_timestamp;
return (0);
}
static void
spa_checkpoint_discard_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_t *spa = arg;
spa->spa_checkpoint_info.sci_timestamp = 0;
spa_feature_decr(spa, SPA_FEATURE_POOL_CHECKPOINT, tx);
spa_notify_waiters(spa);
spa_history_log_internal(spa, "spa discard checkpoint", tx,
"finished discarding checkpointed state from the pool");
}
typedef struct spa_checkpoint_discard_sync_callback_arg {
vdev_t *sdc_vd;
uint64_t sdc_txg;
uint64_t sdc_entry_limit;
} spa_checkpoint_discard_sync_callback_arg_t;
static int
spa_checkpoint_discard_sync_callback(space_map_entry_t *sme, void *arg)
{
spa_checkpoint_discard_sync_callback_arg_t *sdc = arg;
vdev_t *vd = sdc->sdc_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
if (sdc->sdc_entry_limit == 0)
return (SET_ERROR(EINTR));
/*
* Since the space map is not condensed, we know that
* none of its entries is crossing the boundaries of
* its respective metaslab.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_type, ==, SM_FREE);
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* At this point we should not be processing any
* other frees concurrently, so the lock is technically
* unnecessary. We use the lock anyway though to
* potentially save ourselves from future headaches.
*/
mutex_enter(&ms->ms_lock);
- if (range_tree_is_empty(ms->ms_freeing))
+ if (zfs_range_tree_is_empty(ms->ms_freeing))
vdev_dirty(vd, VDD_METASLAB, ms, sdc->sdc_txg);
- range_tree_add(ms->ms_freeing, sme->sme_offset, sme->sme_run);
+ zfs_range_tree_add(ms->ms_freeing, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
ASSERT3U(vd->vdev_spa->spa_checkpoint_info.sci_dspace, >=,
sme->sme_run);
ASSERT3U(vd->vdev_stat.vs_checkpoint_space, >=, sme->sme_run);
vd->vdev_spa->spa_checkpoint_info.sci_dspace -= sme->sme_run;
vd->vdev_stat.vs_checkpoint_space -= sme->sme_run;
sdc->sdc_entry_limit--;
return (0);
}
#ifdef ZFS_DEBUG
static void
spa_checkpoint_accounting_verify(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t ckpoint_sm_space_sum = 0;
uint64_t vs_ckpoint_space_sum = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (vd->vdev_checkpoint_sm != NULL) {
ckpoint_sm_space_sum +=
-space_map_allocated(vd->vdev_checkpoint_sm);
vs_ckpoint_space_sum +=
vd->vdev_stat.vs_checkpoint_space;
ASSERT3U(ckpoint_sm_space_sum, ==,
vs_ckpoint_space_sum);
} else {
ASSERT0(vd->vdev_stat.vs_checkpoint_space);
}
}
ASSERT3U(spa->spa_checkpoint_info.sci_dspace, ==, ckpoint_sm_space_sum);
}
#endif
static void
spa_checkpoint_discard_thread_sync(void *arg, dmu_tx_t *tx)
{
vdev_t *vd = arg;
int error;
/*
* The space map callback is applied only to non-debug entries.
* Because the number of debug entries is less or equal to the
* number of non-debug entries, we want to ensure that we only
* read what we prefetched from open-context.
*
* Thus, we set the maximum entries that the space map callback
* will be applied to be half the entries that could fit in the
* imposed memory limit.
*
* Note that since this is a conservative estimate we also
* assume the worst case scenario in our computation where each
* entry is two-word.
*/
uint64_t max_entry_limit =
(zfs_spa_discard_memory_limit / (2 * sizeof (uint64_t))) >> 1;
/*
* Iterate from the end of the space map towards the beginning,
* placing its entries on ms_freeing and removing them from the
* space map. The iteration stops if one of the following
* conditions is true:
*
* 1] We reached the beginning of the space map. At this point
* the space map should be completely empty and
* space_map_incremental_destroy should have returned 0.
* The next step would be to free and close the space map
* and remove its entry from its vdev's top zap. This allows
* spa_checkpoint_discard_thread() to move on to the next vdev.
*
* 2] We reached the memory limit (amount of memory used to hold
* space map entries in memory) and space_map_incremental_destroy
* returned EINTR. This means that there are entries remaining
* in the space map that will be cleared in a future invocation
* of this function by spa_checkpoint_discard_thread().
*/
spa_checkpoint_discard_sync_callback_arg_t sdc;
sdc.sdc_vd = vd;
sdc.sdc_txg = tx->tx_txg;
sdc.sdc_entry_limit = max_entry_limit;
uint64_t words_before =
space_map_length(vd->vdev_checkpoint_sm) / sizeof (uint64_t);
error = space_map_incremental_destroy(vd->vdev_checkpoint_sm,
spa_checkpoint_discard_sync_callback, &sdc, tx);
uint64_t words_after =
space_map_length(vd->vdev_checkpoint_sm) / sizeof (uint64_t);
#ifdef ZFS_DEBUG
spa_checkpoint_accounting_verify(vd->vdev_spa);
#endif
zfs_dbgmsg("discarding checkpoint: txg %llu, vdev id %lld, "
"deleted %llu words - %llu words are left",
(u_longlong_t)tx->tx_txg, (longlong_t)vd->vdev_id,
(u_longlong_t)(words_before - words_after),
(u_longlong_t)words_after);
if (error != EINTR) {
if (error != 0) {
zfs_panic_recover("zfs: error %lld was returned "
"while incrementally destroying the checkpoint "
"space map of vdev %llu\n",
(longlong_t)error, vd->vdev_id);
}
ASSERT0(words_after);
ASSERT0(space_map_allocated(vd->vdev_checkpoint_sm));
ASSERT0(space_map_length(vd->vdev_checkpoint_sm));
space_map_free(vd->vdev_checkpoint_sm, tx);
space_map_close(vd->vdev_checkpoint_sm);
vd->vdev_checkpoint_sm = NULL;
VERIFY0(zap_remove(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, tx));
}
}
static boolean_t
spa_checkpoint_discard_is_done(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(!spa_has_checkpoint(spa));
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT));
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
if (rvd->vdev_child[c]->vdev_checkpoint_sm != NULL)
return (B_FALSE);
ASSERT0(rvd->vdev_child[c]->vdev_stat.vs_checkpoint_space);
}
return (B_TRUE);
}
boolean_t
spa_checkpoint_discard_thread_check(void *arg, zthr_t *zthr)
{
(void) zthr;
spa_t *spa = arg;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (B_FALSE);
if (spa_has_checkpoint(spa))
return (B_FALSE);
return (B_TRUE);
}
void
spa_checkpoint_discard_thread(void *arg, zthr_t *zthr)
{
spa_t *spa = arg;
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
while (vd->vdev_checkpoint_sm != NULL) {
space_map_t *checkpoint_sm = vd->vdev_checkpoint_sm;
int numbufs;
dmu_buf_t **dbp;
if (zthr_iscancelled(zthr))
return;
ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
uint64_t size = MIN(space_map_length(checkpoint_sm),
zfs_spa_discard_memory_limit);
uint64_t offset =
space_map_length(checkpoint_sm) - size;
/*
* Ensure that the part of the space map that will
* be destroyed by the synctask, is prefetched in
* memory before the synctask runs.
*/
int error = dmu_buf_hold_array_by_bonus(
checkpoint_sm->sm_dbuf, offset, size,
B_TRUE, FTAG, &numbufs, &dbp);
if (error != 0) {
zfs_panic_recover("zfs: error %d was returned "
"while prefetching checkpoint space map "
"entries of vdev %llu\n",
error, vd->vdev_id);
}
VERIFY0(dsl_sync_task(spa->spa_name, NULL,
spa_checkpoint_discard_thread_sync, vd,
0, ZFS_SPACE_CHECK_NONE));
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
}
VERIFY(spa_checkpoint_discard_is_done(spa));
VERIFY0(spa->spa_checkpoint_info.sci_dspace);
VERIFY0(dsl_sync_task(spa->spa_name, NULL,
spa_checkpoint_discard_complete_sync, spa,
0, ZFS_SPACE_CHECK_NONE));
}
static int
spa_checkpoint_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ENOTSUP));
if (!spa_top_vdevs_spacemap_addressable(spa))
return (SET_ERROR(ZFS_ERR_VDEV_TOO_BIG));
if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
return (SET_ERROR(ZFS_ERR_DEVRM_IN_PROGRESS));
if (spa->spa_raidz_expand != NULL)
return (SET_ERROR(ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS));
if (spa->spa_checkpoint_txg != 0)
return (SET_ERROR(ZFS_ERR_CHECKPOINT_EXISTS));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_DISCARDING_CHECKPOINT));
return (0);
}
static void
spa_checkpoint_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
uberblock_t checkpoint = spa->spa_ubsync;
/*
* At this point, there should not be a checkpoint in the MOS.
*/
ASSERT3U(zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT), ==, ENOENT);
ASSERT0(spa->spa_checkpoint_info.sci_timestamp);
ASSERT0(spa->spa_checkpoint_info.sci_dspace);
/*
* Since the checkpointed uberblock is the one that just got synced
* (we use spa_ubsync), its txg must be equal to the txg number of
* the txg we are syncing, minus 1.
*/
ASSERT3U(checkpoint.ub_txg, ==, spa->spa_syncing_txg - 1);
/*
* Once the checkpoint is in place, we need to ensure that none of
* its blocks will be marked for reuse after it has been freed.
* When there is a checkpoint and a block is freed, we compare its
* birth txg to the txg of the checkpointed uberblock to see if the
* block is part of the checkpoint or not. Therefore, we have to set
* spa_checkpoint_txg before any frees happen in this txg (which is
* why this is done as an early_synctask as explained in the comment
* in spa_checkpoint()).
*/
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
checkpoint.ub_checkpoint_txg = checkpoint.ub_txg;
VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT,
sizeof (uint64_t), sizeof (uberblock_t) / sizeof (uint64_t),
&checkpoint, tx));
/*
* Increment the feature refcount and thus activate the feature.
* Note that the feature will be deactivated when we've
* completely discarded all checkpointed state (both vdev
* space maps and uberblock).
*/
spa_feature_incr(spa, SPA_FEATURE_POOL_CHECKPOINT, tx);
spa_history_log_internal(spa, "spa checkpoint", tx,
"checkpointed uberblock txg=%llu", (u_longlong_t)checkpoint.ub_txg);
}
/*
* Create a checkpoint for the pool.
*/
int
spa_checkpoint(const char *pool)
{
int error;
spa_t *spa;
error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
mutex_enter(&spa->spa_vdev_top_lock);
/*
* Wait for current syncing txg to finish so the latest synced
* uberblock (spa_ubsync) has all the changes that we expect
* to see if we were to revert later to the checkpoint. In other
* words we want the checkpointed uberblock to include/reference
* all the changes that were pending at the time that we issued
* the checkpoint command.
*/
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* As the checkpointed uberblock references blocks from the previous
* txg (spa_ubsync) we want to ensure that are not freeing any of
* these blocks in the same txg that the following synctask will
* run. Thus, we run it as an early synctask, so the dirty changes
* that are synced to disk afterwards during zios and other synctasks
* do not reuse checkpointed blocks.
*/
error = dsl_early_sync_task(pool, spa_checkpoint_check,
spa_checkpoint_sync, NULL, 0, ZFS_SPACE_CHECK_NORMAL);
mutex_exit(&spa->spa_vdev_top_lock);
spa_close(spa, FTAG);
return (error);
}
static int
spa_checkpoint_discard_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_NO_CHECKPOINT));
if (spa->spa_checkpoint_txg == 0)
return (SET_ERROR(ZFS_ERR_DISCARDING_CHECKPOINT));
VERIFY0(zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT));
return (0);
}
static void
spa_checkpoint_discard_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
VERIFY0(zap_remove(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, tx));
spa->spa_checkpoint_txg = 0;
zthr_wakeup(spa->spa_checkpoint_discard_zthr);
spa_history_log_internal(spa, "spa discard checkpoint", tx,
"started discarding checkpointed state from the pool");
}
/*
* Discard the checkpoint from a pool.
*/
int
spa_checkpoint_discard(const char *pool)
{
/*
* Similarly to spa_checkpoint(), we want our synctask to run
* before any pending dirty data are written to disk so they
* won't end up in the checkpoint's data structures (e.g.
* ms_checkpointing and vdev_checkpoint_sm) and re-create any
* space maps that the discarding open-context thread has
* deleted.
* [see spa_discard_checkpoint_sync and spa_discard_checkpoint_thread]
*/
return (dsl_early_sync_task(pool, spa_checkpoint_discard_check,
spa_checkpoint_discard_sync, NULL, 0,
ZFS_SPACE_CHECK_DISCARD_CHECKPOINT));
}
EXPORT_SYMBOL(spa_checkpoint_get_stats);
EXPORT_SYMBOL(spa_checkpoint_discard_thread);
EXPORT_SYMBOL(spa_checkpoint_discard_thread_check);
ZFS_MODULE_PARAM(zfs_spa, zfs_spa_, discard_memory_limit, U64, ZMOD_RW,
"Limit for memory used in prefetching the checkpoint space map done "
"on each vdev while discarding the checkpoint");
diff --git a/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c b/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c
index a95152608578..5eb4d043be41 100644
--- a/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c
+++ b/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c
@@ -1,1406 +1,1406 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018, 2019 by Delphix. All rights reserved.
*/
#include <sys/dmu_objset.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/spa_log_spacemap.h>
#include <sys/vdev_impl.h>
#include <sys/zap.h>
/*
* Log Space Maps
*
* Log space maps are an optimization in ZFS metadata allocations for pools
* whose workloads are primarily random-writes. Random-write workloads are also
* typically random-free, meaning that they are freeing from locations scattered
* throughout the pool. This means that each TXG we will have to append some
* FREE records to almost every metaslab. With log space maps, we hold their
* changes in memory and log them altogether in one pool-wide space map on-disk
* for persistence. As more blocks are accumulated in the log space maps and
* more unflushed changes are accounted in memory, we flush a selected group
* of metaslabs every TXG to relieve memory pressure and potential overheads
* when loading the pool. Flushing a metaslab to disk relieves memory as we
* flush any unflushed changes from memory to disk (i.e. the metaslab's space
* map) and saves import time by making old log space maps obsolete and
* eventually destroying them. [A log space map is said to be obsolete when all
* its entries have made it to their corresponding metaslab space maps].
*
* == On disk data structures used ==
*
* - The pool has a new feature flag and a new entry in the MOS. The feature
* is activated when we create the first log space map and remains active
* for the lifetime of the pool. The new entry in the MOS Directory [refer
* to DMU_POOL_LOG_SPACEMAP_ZAP] is populated with a ZAP whose key-value
* pairs are of the form <key: txg, value: log space map object for that txg>.
* This entry is our on-disk reference of the log space maps that exist in
* the pool for each TXG and it is used during import to load all the
* metaslab unflushed changes in memory. To see how this structure is first
* created and later populated refer to spa_generate_syncing_log_sm(). To see
* how it is used during import time refer to spa_ld_log_sm_metadata().
*
* - Each vdev has a new entry in its vdev_top_zap (see field
* VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS) which holds the msp_unflushed_txg of
* each metaslab in this vdev. This field is the on-disk counterpart of the
* in-memory field ms_unflushed_txg which tells us from which TXG and onwards
* the metaslab haven't had its changes flushed. During import, we use this
* to ignore any entries in the space map log that are for this metaslab but
* from a TXG before msp_unflushed_txg. At that point, we also populate its
* in-memory counterpart and from there both fields are updated every time
* we flush that metaslab.
*
* - A space map is created every TXG and, during that TXG, it is used to log
* all incoming changes (the log space map). When created, the log space map
* is referenced in memory by spa_syncing_log_sm and its object ID is inserted
* to the space map ZAP mentioned above. The log space map is closed at the
* end of the TXG and will be destroyed when it becomes fully obsolete. We
* know when a log space map has become obsolete by looking at the oldest
* (and smallest) ms_unflushed_txg in the pool. If the value of that is bigger
* than the log space map's TXG, then it means that there is no metaslab who
* doesn't have the changes from that log and we can therefore destroy it.
* [see spa_cleanup_old_sm_logs()].
*
* == Important in-memory structures ==
*
* - The per-spa field spa_metaslabs_by_flushed sorts all the metaslabs in
* the pool by their ms_unflushed_txg field. It is primarily used for three
* reasons. First of all, it is used during flushing where we try to flush
* metaslabs in-order from the oldest-flushed to the most recently flushed
* every TXG. Secondly, it helps us to lookup the ms_unflushed_txg of the
* oldest flushed metaslab to distinguish which log space maps have become
* obsolete and which ones are still relevant. Finally it tells us which
* metaslabs have unflushed changes in a pool where this feature was just
* enabled, as we don't immediately add all of the pool's metaslabs but we
* add them over time as they go through metaslab_sync(). The reason that
* we do that is to ease these pools into the behavior of the flushing
* algorithm (described later on).
*
* - The per-spa field spa_sm_logs_by_txg can be thought as the in-memory
* counterpart of the space map ZAP mentioned above. It's an AVL tree whose
* nodes represent the log space maps in the pool. This in-memory
* representation of log space maps in the pool sorts the log space maps by
* the TXG that they were created (which is also the TXG of their unflushed
* changes). It also contains the following extra information for each
* space map:
* [1] The number of metaslabs that were last flushed on that TXG. This is
* important because if that counter is zero and this is the oldest
* log then it means that it is also obsolete.
* [2] The number of blocks of that space map. This field is used by the
* block heuristic of our flushing algorithm (described later on).
* It represents how many blocks of metadata changes ZFS had to write
* to disk for that TXG.
*
* - The per-spa field spa_log_summary is a list of entries that summarizes
* the metaslab and block counts of all the nodes of the spa_sm_logs_by_txg
* AVL tree mentioned above. The reason this exists is that our flushing
* algorithm (described later) tries to estimate how many metaslabs to flush
* in each TXG by iterating over all the log space maps and looking at their
* block counts. Summarizing that information means that don't have to
* iterate through each space map, minimizing the runtime overhead of the
* flushing algorithm which would be induced in syncing context. In terms of
* implementation the log summary is used as a queue:
* * we modify or pop entries from its head when we flush metaslabs
* * we modify or append entries to its tail when we sync changes.
*
* - Each metaslab has two new range trees that hold its unflushed changes,
* ms_unflushed_allocs and ms_unflushed_frees. These are always disjoint.
*
* == Flushing algorithm ==
*
* The decision of how many metaslabs to flush on a give TXG is guided by
* two heuristics:
*
* [1] The memory heuristic -
* We keep track of the memory used by the unflushed trees from all the
* metaslabs [see sus_memused of spa_unflushed_stats] and we ensure that it
* stays below a certain threshold which is determined by an arbitrary hard
* limit and an arbitrary percentage of the system's memory [see
* spa_log_exceeds_memlimit()]. When we see that the memory usage of the
* unflushed changes are passing that threshold, we flush metaslabs, which
* empties their unflushed range trees, reducing the memory used.
*
* [2] The block heuristic -
* We try to keep the total number of blocks in the log space maps in check
* so the log doesn't grow indefinitely and we don't induce a lot of overhead
* when loading the pool. At the same time we don't want to flush a lot of
* metaslabs too often as this would defeat the purpose of the log space map.
* As a result we set a limit in the amount of blocks that we think it's
* acceptable for the log space maps to have and try not to cross it.
* [see sus_blocklimit from spa_unflushed_stats].
*
* In order to stay below the block limit every TXG we have to estimate how
* many metaslabs we need to flush based on the current rate of incoming blocks
* and our history of log space map blocks. The main idea here is to answer
* the question of how many metaslabs do we need to flush in order to get rid
* at least an X amount of log space map blocks. We can answer this question
* by iterating backwards from the oldest log space map to the newest one
* and looking at their metaslab and block counts. At this point the log summary
* mentioned above comes handy as it reduces the amount of things that we have
* to iterate (even though it may reduce the preciseness of our estimates due
* to its aggregation of data). So with that in mind, we project the incoming
* rate of the current TXG into the future and attempt to approximate how many
* metaslabs would we need to flush from now in order to avoid exceeding our
* block limit in different points in the future (granted that we would keep
* flushing the same number of metaslabs for every TXG). Then we take the
* maximum number from all these estimates to be on the safe side. For the
* exact implementation details of algorithm refer to
* spa_estimate_metaslabs_to_flush.
*/
/*
* This is used as the block size for the space maps used for the
* log space map feature. These space maps benefit from a bigger
* block size as we expect to be writing a lot of data to them at
* once.
*/
static const unsigned long zfs_log_sm_blksz = 1ULL << 17;
/*
* Percentage of the overall system's memory that ZFS allows to be
* used for unflushed changes (e.g. the sum of size of all the nodes
* in the unflushed trees).
*
* Note that this value is calculated over 1000000 for finer granularity
* (thus the _ppm suffix; reads as "parts per million"). As an example,
* the default of 1000 allows 0.1% of memory to be used.
*/
static uint64_t zfs_unflushed_max_mem_ppm = 1000;
/*
* Specific hard-limit in memory that ZFS allows to be used for
* unflushed changes.
*/
static uint64_t zfs_unflushed_max_mem_amt = 1ULL << 30;
/*
* The following tunable determines the number of blocks that can be used for
* the log space maps. It is expressed as a percentage of the total number of
* metaslabs in the pool (i.e. the default of 400 means that the number of log
* blocks is capped at 4 times the number of metaslabs).
*
* This value exists to tune our flushing algorithm, with higher values
* flushing metaslabs less often (doing less I/Os) per TXG versus lower values
* flushing metaslabs more aggressively with the upside of saving overheads
* when loading the pool. Another factor in this tradeoff is that flushing
* less often can potentially lead to better utilization of the metaslab space
* map's block size as we accumulate more changes per flush.
*
* Given that this tunable indirectly controls the flush rate (metaslabs
* flushed per txg) and that's why making it a percentage in terms of the
* number of metaslabs in the pool makes sense here.
*
* As a rule of thumb we default this tunable to 400% based on the following:
*
* 1] Assuming a constant flush rate and a constant incoming rate of log blocks
* it is reasonable to expect that the amount of obsolete entries changes
* linearly from txg to txg (e.g. the oldest log should have the most
* obsolete entries, and the most recent one the least). With this we could
* say that, at any given time, about half of the entries in the whole space
* map log are obsolete. Thus for every two entries for a metaslab in the
* log space map, only one of them is valid and actually makes it to the
* metaslab's space map.
* [factor of 2]
* 2] Each entry in the log space map is guaranteed to be two words while
* entries in metaslab space maps are generally single-word.
* [an extra factor of 2 - 400% overall]
* 3] Even if [1] and [2] are slightly less than 2 each, we haven't taken into
* account any consolidation of segments from the log space map to the
* unflushed range trees nor their history (e.g. a segment being allocated,
* then freed, then allocated again means 3 log space map entries but 0
* metaslab space map entries). Depending on the workload, we've seen ~1.8
* non-obsolete log space map entries per metaslab entry, for a total of
* ~600%. Since most of these estimates though are workload dependent, we
* default on 400% to be conservative.
*
* Thus we could say that even in the worst
* case of [1] and [2], the factor should end up being 4.
*
* That said, regardless of the number of metaslabs in the pool we need to
* provide upper and lower bounds for the log block limit.
* [see zfs_unflushed_log_block_{min,max}]
*/
static uint_t zfs_unflushed_log_block_pct = 400;
/*
* If the number of metaslabs is small and our incoming rate is high, we could
* get into a situation that we are flushing all our metaslabs every TXG. Thus
* we always allow at least this many log blocks.
*/
static uint64_t zfs_unflushed_log_block_min = 1000;
/*
* If the log becomes too big, the import time of the pool can take a hit in
* terms of performance. Thus we have a hard limit in the size of the log in
* terms of blocks.
*/
static uint64_t zfs_unflushed_log_block_max = (1ULL << 17);
/*
* Also we have a hard limit in the size of the log in terms of dirty TXGs.
*/
static uint64_t zfs_unflushed_log_txg_max = 1000;
/*
* Max # of rows allowed for the log_summary. The tradeoff here is accuracy and
* stability of the flushing algorithm (longer summary) vs its runtime overhead
* (smaller summary is faster to traverse).
*/
static uint64_t zfs_max_logsm_summary_length = 10;
/*
* Tunable that sets the lower bound on the metaslabs to flush every TXG.
*
* Setting this to 0 has no effect since if the pool is idle we won't even be
* creating log space maps and therefore we won't be flushing. On the other
* hand if the pool has any incoming workload our block heuristic will start
* flushing metaslabs anyway.
*
* The point of this tunable is to be used in extreme cases where we really
* want to flush more metaslabs than our adaptable heuristic plans to flush.
*/
static uint64_t zfs_min_metaslabs_to_flush = 1;
/*
* Tunable that specifies how far in the past do we want to look when trying to
* estimate the incoming log blocks for the current TXG.
*
* Setting this too high may not only increase runtime but also minimize the
* effect of the incoming rates from the most recent TXGs as we take the
* average over all the blocks that we walk
* [see spa_estimate_incoming_log_blocks].
*/
static uint64_t zfs_max_log_walking = 5;
/*
* This tunable exists solely for testing purposes. It ensures that the log
* spacemaps are not flushed and destroyed during export in order for the
* relevant log spacemap import code paths to be tested (effectively simulating
* a crash).
*/
int zfs_keep_log_spacemaps_at_export = 0;
static uint64_t
spa_estimate_incoming_log_blocks(spa_t *spa)
{
ASSERT3U(spa_sync_pass(spa), ==, 1);
uint64_t steps = 0, sum = 0;
for (spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
sls != NULL && steps < zfs_max_log_walking;
sls = AVL_PREV(&spa->spa_sm_logs_by_txg, sls)) {
if (sls->sls_txg == spa_syncing_txg(spa)) {
/*
* skip the log created in this TXG as this would
* make our estimations inaccurate.
*/
continue;
}
sum += sls->sls_nblocks;
steps++;
}
return ((steps > 0) ? DIV_ROUND_UP(sum, steps) : 0);
}
uint64_t
spa_log_sm_blocklimit(spa_t *spa)
{
return (spa->spa_unflushed_stats.sus_blocklimit);
}
void
spa_log_sm_set_blocklimit(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
ASSERT0(spa_log_sm_blocklimit(spa));
return;
}
uint64_t msdcount = 0;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e))
msdcount += e->lse_msdcount;
uint64_t limit = msdcount * zfs_unflushed_log_block_pct / 100;
spa->spa_unflushed_stats.sus_blocklimit = MIN(MAX(limit,
zfs_unflushed_log_block_min), zfs_unflushed_log_block_max);
}
uint64_t
spa_log_sm_nblocks(spa_t *spa)
{
return (spa->spa_unflushed_stats.sus_nblocks);
}
/*
* Ensure that the in-memory log space map structures and the summary
* have the same block and metaslab counts.
*/
static void
spa_log_summary_verify_counts(spa_t *spa)
{
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
if ((zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) == 0)
return;
uint64_t ms_in_avl = avl_numnodes(&spa->spa_metaslabs_by_flushed);
uint64_t ms_in_summary = 0, blk_in_summary = 0;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e)) {
ms_in_summary += e->lse_mscount;
blk_in_summary += e->lse_blkcount;
}
uint64_t ms_in_logs = 0, blk_in_logs = 0;
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
ms_in_logs += sls->sls_mscount;
blk_in_logs += sls->sls_nblocks;
}
VERIFY3U(ms_in_logs, ==, ms_in_summary);
VERIFY3U(ms_in_logs, ==, ms_in_avl);
VERIFY3U(blk_in_logs, ==, blk_in_summary);
VERIFY3U(blk_in_logs, ==, spa_log_sm_nblocks(spa));
}
static boolean_t
summary_entry_is_full(spa_t *spa, log_summary_entry_t *e, uint64_t txg)
{
if (e->lse_end == txg)
return (0);
if (e->lse_txgcount >= DIV_ROUND_UP(zfs_unflushed_log_txg_max,
zfs_max_logsm_summary_length))
return (1);
uint64_t blocks_per_row = MAX(1,
DIV_ROUND_UP(spa_log_sm_blocklimit(spa),
zfs_max_logsm_summary_length));
return (blocks_per_row <= e->lse_blkcount);
}
/*
* Update the log summary information to reflect the fact that a metaslab
* was flushed or destroyed (e.g due to device removal or pool export/destroy).
*
* We typically flush the oldest flushed metaslab so the first (and oldest)
* entry of the summary is updated. However if that metaslab is getting loaded
* we may flush the second oldest one which may be part of an entry later in
* the summary. Moreover, if we call into this function from metaslab_fini()
* the metaslabs probably won't be ordered by ms_unflushed_txg. Thus we ask
* for a txg as an argument so we can locate the appropriate summary entry for
* the metaslab.
*/
void
spa_log_summary_decrement_mscount(spa_t *spa, uint64_t txg, boolean_t dirty)
{
/*
* We don't track summary data for read-only pools and this function
* can be called from metaslab_fini(). In that case return immediately.
*/
if (!spa_writeable(spa))
return;
log_summary_entry_t *target = NULL;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_next(&spa->spa_log_summary, e)) {
if (e->lse_start > txg)
break;
target = e;
}
if (target == NULL || target->lse_mscount == 0) {
/*
* We didn't find a summary entry for this metaslab. We must be
* at the teardown of a spa_load() attempt that got an error
* while reading the log space maps.
*/
VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
return;
}
target->lse_mscount--;
if (dirty)
target->lse_msdcount--;
}
/*
* Update the log summary information to reflect the fact that we destroyed
* old log space maps. Since we can only destroy the oldest log space maps,
* we decrement the block count of the oldest summary entry and potentially
* destroy it when that count hits 0.
*
* This function is called after a metaslab is flushed and typically that
* metaslab is the oldest flushed, which means that this function will
* typically decrement the block count of the first entry of the summary and
* potentially free it if the block count gets to zero (its metaslab count
* should be zero too at that point).
*
* There are certain scenarios though that don't work exactly like that so we
* need to account for them:
*
* Scenario [1]: It is possible that after we flushed the oldest flushed
* metaslab and we destroyed the oldest log space map, more recent logs had 0
* metaslabs pointing to them so we got rid of them too. This can happen due
* to metaslabs being destroyed through device removal, or because the oldest
* flushed metaslab was loading but we kept flushing more recently flushed
* metaslabs due to the memory pressure of unflushed changes. Because of that,
* we always iterate from the beginning of the summary and if blocks_gone is
* bigger than the block_count of the current entry we free that entry (we
* expect its metaslab count to be zero), we decrement blocks_gone and on to
* the next entry repeating this procedure until blocks_gone gets decremented
* to 0. Doing this also works for the typical case mentioned above.
*
* Scenario [2]: The oldest flushed metaslab isn't necessarily accounted by
* the first (and oldest) entry in the summary. If the first few entries of
* the summary were only accounting metaslabs from a device that was just
* removed, then the current oldest flushed metaslab could be accounted by an
* entry somewhere in the middle of the summary. Moreover flushing that
* metaslab will destroy all the log space maps older than its ms_unflushed_txg
* because they became obsolete after the removal. Thus, iterating as we did
* for scenario [1] works out for this case too.
*
* Scenario [3]: At times we decide to flush all the metaslabs in the pool
* in one TXG (either because we are exporting the pool or because our flushing
* heuristics decided to do so). When that happens all the log space maps get
* destroyed except the one created for the current TXG which doesn't have
* any log blocks yet. As log space maps get destroyed with every metaslab that
* we flush, entries in the summary are also destroyed. This brings a weird
* corner-case when we flush the last metaslab and the log space map of the
* current TXG is in the same summary entry with other log space maps that
* are older. When that happens we are eventually left with this one last
* summary entry whose blocks are gone (blocks_gone equals the entry's block
* count) but its metaslab count is non-zero (because it accounts all the
* metaslabs in the pool as they all got flushed). Under this scenario we can't
* free this last summary entry as it's referencing all the metaslabs in the
* pool and its block count will get incremented at the end of this sync (when
* we close the syncing log space map). Thus we just decrement its current
* block count and leave it alone. In the case that the pool gets exported,
* its metaslab count will be decremented over time as we call metaslab_fini()
* for all the metaslabs in the pool and the entry will be freed at
* spa_unload_log_sm_metadata().
*/
void
spa_log_summary_decrement_blkcount(spa_t *spa, uint64_t blocks_gone)
{
log_summary_entry_t *e = list_head(&spa->spa_log_summary);
ASSERT3P(e, !=, NULL);
if (e->lse_txgcount > 0)
e->lse_txgcount--;
for (; e != NULL; e = list_head(&spa->spa_log_summary)) {
if (e->lse_blkcount > blocks_gone) {
e->lse_blkcount -= blocks_gone;
blocks_gone = 0;
break;
} else if (e->lse_mscount == 0) {
/* remove obsolete entry */
blocks_gone -= e->lse_blkcount;
list_remove(&spa->spa_log_summary, e);
kmem_free(e, sizeof (log_summary_entry_t));
} else {
/* Verify that this is scenario [3] mentioned above. */
VERIFY3U(blocks_gone, ==, e->lse_blkcount);
/*
* Assert that this is scenario [3] further by ensuring
* that this is the only entry in the summary.
*/
VERIFY3P(e, ==, list_tail(&spa->spa_log_summary));
ASSERT3P(e, ==, list_head(&spa->spa_log_summary));
blocks_gone = e->lse_blkcount = 0;
break;
}
}
/*
* Ensure that there is no way we are trying to remove more blocks
* than the # of blocks in the summary.
*/
ASSERT0(blocks_gone);
}
void
spa_log_sm_decrement_mscount(spa_t *spa, uint64_t txg)
{
spa_log_sm_t target = { .sls_txg = txg };
spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
&target, NULL);
if (sls == NULL) {
/*
* We must be at the teardown of a spa_load() attempt that
* got an error while reading the log space maps.
*/
VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
return;
}
ASSERT(sls->sls_mscount > 0);
sls->sls_mscount--;
}
void
spa_log_sm_increment_current_mscount(spa_t *spa)
{
spa_log_sm_t *last_sls = avl_last(&spa->spa_sm_logs_by_txg);
ASSERT3U(last_sls->sls_txg, ==, spa_syncing_txg(spa));
last_sls->sls_mscount++;
}
static void
summary_add_data(spa_t *spa, uint64_t txg, uint64_t metaslabs_flushed,
uint64_t metaslabs_dirty, uint64_t nblocks)
{
log_summary_entry_t *e = list_tail(&spa->spa_log_summary);
if (e == NULL || summary_entry_is_full(spa, e, txg)) {
e = kmem_zalloc(sizeof (log_summary_entry_t), KM_SLEEP);
e->lse_start = e->lse_end = txg;
e->lse_txgcount = 1;
list_insert_tail(&spa->spa_log_summary, e);
}
ASSERT3U(e->lse_start, <=, txg);
if (e->lse_end < txg) {
e->lse_end = txg;
e->lse_txgcount++;
}
e->lse_mscount += metaslabs_flushed;
e->lse_msdcount += metaslabs_dirty;
e->lse_blkcount += nblocks;
}
static void
spa_log_summary_add_incoming_blocks(spa_t *spa, uint64_t nblocks)
{
summary_add_data(spa, spa_syncing_txg(spa), 0, 0, nblocks);
}
void
spa_log_summary_add_flushed_metaslab(spa_t *spa, boolean_t dirty)
{
summary_add_data(spa, spa_syncing_txg(spa), 1, dirty ? 1 : 0, 0);
}
void
spa_log_summary_dirty_flushed_metaslab(spa_t *spa, uint64_t txg)
{
log_summary_entry_t *target = NULL;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_next(&spa->spa_log_summary, e)) {
if (e->lse_start > txg)
break;
target = e;
}
ASSERT3P(target, !=, NULL);
ASSERT3U(target->lse_mscount, !=, 0);
target->lse_msdcount++;
}
/*
* This function attempts to estimate how many metaslabs should
* we flush to satisfy our block heuristic for the log spacemap
* for the upcoming TXGs.
*
* Specifically, it first tries to estimate the number of incoming
* blocks in this TXG. Then by projecting that incoming rate to
* future TXGs and using the log summary, it figures out how many
* flushes we would need to do for future TXGs individually to
* stay below our block limit and returns the maximum number of
* flushes from those estimates.
*/
static uint64_t
spa_estimate_metaslabs_to_flush(spa_t *spa)
{
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(spa_log_sm_blocklimit(spa) != 0);
/*
* This variable contains the incoming rate that will be projected
* and used for our flushing estimates in the future.
*/
uint64_t incoming = spa_estimate_incoming_log_blocks(spa);
/*
* At any point in time this variable tells us how many
* TXGs in the future we are so we can make our estimations.
*/
uint64_t txgs_in_future = 1;
/*
* This variable tells us how much room do we have until we hit
* our limit. When it goes negative, it means that we've exceeded
* our limit and we need to flush.
*
* Note that since we start at the first TXG in the future (i.e.
* txgs_in_future starts from 1) we already decrement this
* variable by the incoming rate.
*/
int64_t available_blocks =
spa_log_sm_blocklimit(spa) - spa_log_sm_nblocks(spa) - incoming;
int64_t available_txgs = zfs_unflushed_log_txg_max;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e))
available_txgs -= e->lse_txgcount;
/*
* This variable tells us the total number of flushes needed to
* keep the log size within the limit when we reach txgs_in_future.
*/
uint64_t total_flushes = 0;
/* Holds the current maximum of our estimates so far. */
uint64_t max_flushes_pertxg = zfs_min_metaslabs_to_flush;
/*
* For our estimations we only look as far in the future
* as the summary allows us.
*/
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e)) {
/*
* If there is still room before we exceed our limit
* then keep skipping TXGs accumulating more blocks
* based on the incoming rate until we exceed it.
*/
if (available_blocks >= 0 && available_txgs >= 0) {
uint64_t skip_txgs = (incoming == 0) ?
available_txgs + 1 : MIN(available_txgs + 1,
(available_blocks / incoming) + 1);
available_blocks -= (skip_txgs * incoming);
available_txgs -= skip_txgs;
txgs_in_future += skip_txgs;
ASSERT3S(available_blocks, >=, -incoming);
ASSERT3S(available_txgs, >=, -1);
}
/*
* At this point we're far enough into the future where
* the limit was just exceeded and we flush metaslabs
* based on the current entry in the summary, updating
* our available_blocks.
*/
ASSERT(available_blocks < 0 || available_txgs < 0);
available_blocks += e->lse_blkcount;
available_txgs += e->lse_txgcount;
total_flushes += e->lse_msdcount;
/*
* Keep the running maximum of the total_flushes that
* we've done so far over the number of TXGs in the
* future that we are. The idea here is to estimate
* the average number of flushes that we should do
* every TXG so that when we are that many TXGs in the
* future we stay under the limit.
*/
max_flushes_pertxg = MAX(max_flushes_pertxg,
DIV_ROUND_UP(total_flushes, txgs_in_future));
}
return (max_flushes_pertxg);
}
uint64_t
spa_log_sm_memused(spa_t *spa)
{
return (spa->spa_unflushed_stats.sus_memused);
}
static boolean_t
spa_log_exceeds_memlimit(spa_t *spa)
{
if (spa_log_sm_memused(spa) > zfs_unflushed_max_mem_amt)
return (B_TRUE);
uint64_t system_mem_allowed = ((physmem * PAGESIZE) *
zfs_unflushed_max_mem_ppm) / 1000000;
if (spa_log_sm_memused(spa) > system_mem_allowed)
return (B_TRUE);
return (B_FALSE);
}
boolean_t
spa_flush_all_logs_requested(spa_t *spa)
{
return (spa->spa_log_flushall_txg != 0);
}
void
spa_flush_metaslabs(spa_t *spa, dmu_tx_t *tx)
{
uint64_t txg = dmu_tx_get_txg(tx);
if (spa_sync_pass(spa) != 1)
return;
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
/*
* If we don't have any metaslabs with unflushed changes
* return immediately.
*/
if (avl_numnodes(&spa->spa_metaslabs_by_flushed) == 0)
return;
/*
* During SPA export we leave a few empty TXGs to go by [see
* spa_final_dirty_txg() to understand why]. For this specific
* case, it is important to not flush any metaslabs as that
* would dirty this TXG.
*
* That said, during one of these dirty TXGs that is less or
* equal to spa_final_dirty(), spa_unload() will request that
* we try to flush all the metaslabs for that TXG before
* exporting the pool, thus we ensure that we didn't get a
* request of flushing everything before we attempt to return
* immediately.
*/
if (BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp) < txg &&
!dmu_objset_is_dirty(spa_meta_objset(spa), txg) &&
!spa_flush_all_logs_requested(spa))
return;
/*
* We need to generate a log space map before flushing because this
* will set up the in-memory data (i.e. node in spa_sm_logs_by_txg)
* for this TXG's flushed metaslab count (aka sls_mscount which is
* manipulated in many ways down the metaslab_flush() codepath).
*
* That is not to say that we may generate a log space map when we
* don't need it. If we are flushing metaslabs, that means that we
* were going to write changes to disk anyway, so even if we were
* not flushing, a log space map would have been created anyway in
* metaslab_sync().
*/
spa_generate_syncing_log_sm(spa, tx);
/*
* This variable tells us how many metaslabs we want to flush based
* on the block-heuristic of our flushing algorithm (see block comment
* of log space map feature). We also decrement this as we flush
* metaslabs and attempt to destroy old log space maps.
*/
uint64_t want_to_flush;
if (spa_flush_all_logs_requested(spa)) {
ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
want_to_flush = UINT64_MAX;
} else {
want_to_flush = spa_estimate_metaslabs_to_flush(spa);
}
/* Used purely for verification purposes */
uint64_t visited = 0;
/*
* Ideally we would only iterate through spa_metaslabs_by_flushed
* using only one variable (curr). We can't do that because
* metaslab_flush() mutates position of curr in the AVL when
* it flushes that metaslab by moving it to the end of the tree.
* Thus we always keep track of the original next node of the
* current node (curr) in another variable (next).
*/
metaslab_t *next = NULL;
for (metaslab_t *curr = avl_first(&spa->spa_metaslabs_by_flushed);
curr != NULL; curr = next) {
next = AVL_NEXT(&spa->spa_metaslabs_by_flushed, curr);
/*
* If this metaslab has been flushed this txg then we've done
* a full circle over the metaslabs.
*/
if (metaslab_unflushed_txg(curr) == txg)
break;
/*
* If we are done flushing for the block heuristic and the
* unflushed changes don't exceed the memory limit just stop.
*/
if (want_to_flush == 0 && !spa_log_exceeds_memlimit(spa))
break;
if (metaslab_unflushed_dirty(curr)) {
mutex_enter(&curr->ms_sync_lock);
mutex_enter(&curr->ms_lock);
metaslab_flush(curr, tx);
mutex_exit(&curr->ms_lock);
mutex_exit(&curr->ms_sync_lock);
if (want_to_flush > 0)
want_to_flush--;
} else
metaslab_unflushed_bump(curr, tx, B_FALSE);
visited++;
}
ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=, visited);
spa_log_sm_set_blocklimit(spa);
}
/*
* Close the log space map for this TXG and update the block counts
* for the log's in-memory structure and the summary.
*/
void
spa_sync_close_syncing_log_sm(spa_t *spa)
{
if (spa_syncing_log_sm(spa) == NULL)
return;
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
ASSERT3U(sls->sls_txg, ==, spa_syncing_txg(spa));
sls->sls_nblocks = space_map_nblocks(spa_syncing_log_sm(spa));
spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
/*
* Note that we can't assert that sls_mscount is not 0,
* because there is the case where the first metaslab
* in spa_metaslabs_by_flushed is loading and we were
* not able to flush any metaslabs the current TXG.
*/
ASSERT(sls->sls_nblocks != 0);
spa_log_summary_add_incoming_blocks(spa, sls->sls_nblocks);
spa_log_summary_verify_counts(spa);
space_map_close(spa->spa_syncing_log_sm);
spa->spa_syncing_log_sm = NULL;
/*
* At this point we tried to flush as many metaslabs as we
* can as the pool is getting exported. Reset the "flush all"
* so the last few TXGs before closing the pool can be empty
* (e.g. not dirty).
*/
if (spa_flush_all_logs_requested(spa)) {
ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
spa->spa_log_flushall_txg = 0;
}
}
void
spa_cleanup_old_sm_logs(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(spa);
uint64_t spacemap_zap;
int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT) {
ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
return;
}
VERIFY0(error);
metaslab_t *oldest = avl_first(&spa->spa_metaslabs_by_flushed);
uint64_t oldest_flushed_txg = metaslab_unflushed_txg(oldest);
/* Free all log space maps older than the oldest_flushed_txg. */
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls && sls->sls_txg < oldest_flushed_txg;
sls = avl_first(&spa->spa_sm_logs_by_txg)) {
ASSERT0(sls->sls_mscount);
avl_remove(&spa->spa_sm_logs_by_txg, sls);
space_map_free_obj(mos, sls->sls_sm_obj, tx);
VERIFY0(zap_remove_int(mos, spacemap_zap, sls->sls_txg, tx));
spa_log_summary_decrement_blkcount(spa, sls->sls_nblocks);
spa->spa_unflushed_stats.sus_nblocks -= sls->sls_nblocks;
kmem_free(sls, sizeof (spa_log_sm_t));
}
}
static spa_log_sm_t *
spa_log_sm_alloc(uint64_t sm_obj, uint64_t txg)
{
spa_log_sm_t *sls = kmem_zalloc(sizeof (*sls), KM_SLEEP);
sls->sls_sm_obj = sm_obj;
sls->sls_txg = txg;
return (sls);
}
void
spa_generate_syncing_log_sm(spa_t *spa, dmu_tx_t *tx)
{
uint64_t txg = dmu_tx_get_txg(tx);
objset_t *mos = spa_meta_objset(spa);
if (spa_syncing_log_sm(spa) != NULL)
return;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
uint64_t spacemap_zap;
int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT) {
ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
error = 0;
spacemap_zap = zap_create(mos,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1,
&spacemap_zap, tx));
spa_feature_incr(spa, SPA_FEATURE_LOG_SPACEMAP, tx);
}
VERIFY0(error);
uint64_t sm_obj;
ASSERT3U(zap_lookup_int_key(mos, spacemap_zap, txg, &sm_obj),
==, ENOENT);
sm_obj = space_map_alloc(mos, zfs_log_sm_blksz, tx);
VERIFY0(zap_add_int_key(mos, spacemap_zap, txg, sm_obj, tx));
avl_add(&spa->spa_sm_logs_by_txg, spa_log_sm_alloc(sm_obj, txg));
/*
* We pass UINT64_MAX as the space map's representation size
* and SPA_MINBLOCKSHIFT as the shift, to make the space map
* accept any sorts of segments since there's no real advantage
* to being more restrictive (given that we're already going
* to be using 2-word entries).
*/
VERIFY0(space_map_open(&spa->spa_syncing_log_sm, mos, sm_obj,
0, UINT64_MAX, SPA_MINBLOCKSHIFT));
spa_log_sm_set_blocklimit(spa);
}
/*
* Find all the log space maps stored in the space map ZAP and sort
* them by their TXG in spa_sm_logs_by_txg.
*/
static int
spa_ld_log_sm_metadata(spa_t *spa)
{
int error;
uint64_t spacemap_zap;
ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
error = zap_lookup(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT) {
/* the space map ZAP doesn't exist yet */
return (0);
} else if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_metadata(): failed at "
"zap_lookup(DMU_POOL_DIRECTORY_OBJECT) [error %d]",
error);
return (error);
}
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
for (zap_cursor_init(&zc, spa_meta_objset(spa), spacemap_zap);
(error = zap_cursor_retrieve(&zc, za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t log_txg = zfs_strtonum(za->za_name, NULL);
spa_log_sm_t *sls =
spa_log_sm_alloc(za->za_first_integer, log_txg);
avl_add(&spa->spa_sm_logs_by_txg, sls);
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
if (error != ENOENT) {
spa_load_failed(spa, "spa_ld_log_sm_metadata(): failed at "
"zap_cursor_retrieve(spacemap_zap) [error %d]",
error);
return (error);
}
for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
m; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
spa_log_sm_t target = { .sls_txg = metaslab_unflushed_txg(m) };
spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
&target, NULL);
/*
* At this point if sls is zero it means that a bug occurred
* in ZFS the last time the pool was open or earlier in the
* import code path. In general, we would have placed a
* VERIFY() here or in this case just let the kernel panic
* with NULL pointer dereference when incrementing sls_mscount,
* but since this is the import code path we can be a bit more
* lenient. Thus, for DEBUG bits we always cause a panic, while
* in production we log the error and just fail the import.
*/
ASSERT(sls != NULL);
if (sls == NULL) {
spa_load_failed(spa, "spa_ld_log_sm_metadata(): bug "
"encountered: could not find log spacemap for "
"TXG %llu [error %d]",
(u_longlong_t)metaslab_unflushed_txg(m), ENOENT);
return (ENOENT);
}
sls->sls_mscount++;
}
return (0);
}
typedef struct spa_ld_log_sm_arg {
spa_t *slls_spa;
uint64_t slls_txg;
} spa_ld_log_sm_arg_t;
static int
spa_ld_log_sm_cb(space_map_entry_t *sme, void *arg)
{
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint32_t vdev_id = sme->sme_vdev;
spa_ld_log_sm_arg_t *slls = arg;
spa_t *spa = slls->slls_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/*
* If the vdev has been removed (i.e. it is indirect or a hole)
* skip this entry. The contents of this vdev have already moved
* elsewhere.
*/
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(!ms->ms_loaded);
/*
* If we have already flushed entries for this TXG to this
* metaslab's space map, then ignore it. Note that we flush
* before processing any allocations/frees for that TXG, so
* the metaslab's space map only has entries from *before*
* the unflushed TXG.
*/
if (slls->slls_txg < metaslab_unflushed_txg(ms))
return (0);
switch (sme->sme_type) {
case SM_ALLOC:
- range_tree_remove_xor_add_segment(offset, offset + size,
+ zfs_range_tree_remove_xor_add_segment(offset, offset + size,
ms->ms_unflushed_frees, ms->ms_unflushed_allocs);
break;
case SM_FREE:
- range_tree_remove_xor_add_segment(offset, offset + size,
+ zfs_range_tree_remove_xor_add_segment(offset, offset + size,
ms->ms_unflushed_allocs, ms->ms_unflushed_frees);
break;
default:
panic("invalid maptype_t");
break;
}
if (!metaslab_unflushed_dirty(ms)) {
metaslab_set_unflushed_dirty(ms, B_TRUE);
spa_log_summary_dirty_flushed_metaslab(spa,
metaslab_unflushed_txg(ms));
}
return (0);
}
static int
spa_ld_log_sm_data(spa_t *spa)
{
spa_log_sm_t *sls, *psls;
int error = 0;
/*
* If we are not going to do any writes there is no need
* to read the log space maps.
*/
if (!spa_writeable(spa))
return (0);
ASSERT0(spa->spa_unflushed_stats.sus_nblocks);
ASSERT0(spa->spa_unflushed_stats.sus_memused);
hrtime_t read_logs_starttime = gethrtime();
/* Prefetch log spacemaps dnodes. */
for (sls = avl_first(&spa->spa_sm_logs_by_txg); sls;
sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
dmu_prefetch_dnode(spa_meta_objset(spa), sls->sls_sm_obj,
ZIO_PRIORITY_SYNC_READ);
}
uint_t pn = 0;
uint64_t ps = 0;
uint64_t nsm = 0;
psls = sls = avl_first(&spa->spa_sm_logs_by_txg);
while (sls != NULL) {
/* Prefetch log spacemaps up to 16 TXGs or MBs ahead. */
if (psls != NULL && pn < 16 &&
(pn < 2 || ps < 2 * dmu_prefetch_max)) {
error = space_map_open(&psls->sls_sm,
spa_meta_objset(spa), psls->sls_sm_obj, 0,
UINT64_MAX, SPA_MINBLOCKSHIFT);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_data(): "
"failed at space_map_open(obj=%llu) "
"[error %d]",
(u_longlong_t)sls->sls_sm_obj, error);
goto out;
}
dmu_prefetch(spa_meta_objset(spa), psls->sls_sm_obj,
0, 0, space_map_length(psls->sls_sm),
ZIO_PRIORITY_ASYNC_READ);
pn++;
ps += space_map_length(psls->sls_sm);
psls = AVL_NEXT(&spa->spa_sm_logs_by_txg, psls);
continue;
}
/* Load TXG log spacemap into ms_unflushed_allocs/frees. */
kpreempt(KPREEMPT_SYNC);
ASSERT0(sls->sls_nblocks);
sls->sls_nblocks = space_map_nblocks(sls->sls_sm);
spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
summary_add_data(spa, sls->sls_txg,
sls->sls_mscount, 0, sls->sls_nblocks);
spa_import_progress_set_notes_nolog(spa,
"Read %llu of %lu log space maps", (u_longlong_t)nsm,
avl_numnodes(&spa->spa_sm_logs_by_txg));
struct spa_ld_log_sm_arg vla = {
.slls_spa = spa,
.slls_txg = sls->sls_txg
};
error = space_map_iterate(sls->sls_sm,
space_map_length(sls->sls_sm), spa_ld_log_sm_cb, &vla);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_data(): failed "
"at space_map_iterate(obj=%llu) [error %d]",
(u_longlong_t)sls->sls_sm_obj, error);
goto out;
}
pn--;
ps -= space_map_length(sls->sls_sm);
nsm++;
space_map_close(sls->sls_sm);
sls->sls_sm = NULL;
sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls);
/* Update log block limits considering just loaded. */
spa_log_sm_set_blocklimit(spa);
}
hrtime_t read_logs_endtime = gethrtime();
spa_load_note(spa,
"Read %lu log space maps (%llu total blocks - blksz = %llu bytes) "
"in %lld ms", avl_numnodes(&spa->spa_sm_logs_by_txg),
(u_longlong_t)spa_log_sm_nblocks(spa),
(u_longlong_t)zfs_log_sm_blksz,
(longlong_t)NSEC2MSEC(read_logs_endtime - read_logs_starttime));
out:
if (error != 0) {
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
if (sls->sls_sm) {
space_map_close(sls->sls_sm);
sls->sls_sm = NULL;
}
}
} else {
ASSERT0(pn);
ASSERT0(ps);
}
/*
* Now that the metaslabs contain their unflushed changes:
* [1] recalculate their actual allocated space
* [2] recalculate their weights
* [3] sum up the memory usage of their unflushed range trees
* [4] optionally load them, if debug_load is set
*
* Note that even in the case where we get here because of an
* error (e.g. error != 0), we still want to update the fields
* below in order to have a proper teardown in spa_unload().
*/
for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
m != NULL; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
mutex_enter(&m->ms_lock);
m->ms_allocated_space = space_map_allocated(m->ms_sm) +
- range_tree_space(m->ms_unflushed_allocs) -
- range_tree_space(m->ms_unflushed_frees);
+ zfs_range_tree_space(m->ms_unflushed_allocs) -
+ zfs_range_tree_space(m->ms_unflushed_frees);
vdev_t *vd = m->ms_group->mg_vd;
metaslab_space_update(vd, m->ms_group->mg_class,
- range_tree_space(m->ms_unflushed_allocs), 0, 0);
+ zfs_range_tree_space(m->ms_unflushed_allocs), 0, 0);
metaslab_space_update(vd, m->ms_group->mg_class,
- -range_tree_space(m->ms_unflushed_frees), 0, 0);
+ -zfs_range_tree_space(m->ms_unflushed_frees), 0, 0);
ASSERT0(m->ms_weight & METASLAB_ACTIVE_MASK);
metaslab_recalculate_weight_and_sort(m);
spa->spa_unflushed_stats.sus_memused +=
metaslab_unflushed_changes_memused(m);
if (metaslab_debug_load && m->ms_sm != NULL) {
VERIFY0(metaslab_load(m));
metaslab_set_selected_txg(m, 0);
}
mutex_exit(&m->ms_lock);
}
return (error);
}
static int
spa_ld_unflushed_txgs(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
if (vd->vdev_top_zap == 0)
return (0);
uint64_t object = 0;
int error = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &object);
if (error == ENOENT)
return (0);
else if (error != 0) {
spa_load_failed(spa, "spa_ld_unflushed_txgs(): failed at "
"zap_lookup(vdev_top_zap=%llu) [error %d]",
(u_longlong_t)vd->vdev_top_zap, error);
return (error);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
ASSERT(ms != NULL);
metaslab_unflushed_phys_t entry;
uint64_t entry_size = sizeof (entry);
uint64_t entry_offset = ms->ms_id * entry_size;
error = dmu_read(mos, object,
entry_offset, entry_size, &entry, 0);
if (error != 0) {
spa_load_failed(spa, "spa_ld_unflushed_txgs(): "
"failed at dmu_read(obj=%llu) [error %d]",
(u_longlong_t)object, error);
return (error);
}
ms->ms_unflushed_txg = entry.msp_unflushed_txg;
ms->ms_unflushed_dirty = B_FALSE;
- ASSERT(range_tree_is_empty(ms->ms_unflushed_allocs));
- ASSERT(range_tree_is_empty(ms->ms_unflushed_frees));
+ ASSERT(zfs_range_tree_is_empty(ms->ms_unflushed_allocs));
+ ASSERT(zfs_range_tree_is_empty(ms->ms_unflushed_frees));
if (ms->ms_unflushed_txg != 0) {
mutex_enter(&spa->spa_flushed_ms_lock);
avl_add(&spa->spa_metaslabs_by_flushed, ms);
mutex_exit(&spa->spa_flushed_ms_lock);
}
}
return (0);
}
/*
* Read all the log space map entries into their respective
* metaslab unflushed trees and keep them sorted by TXG in the
* SPA's metadata. In addition, setup all the metadata for the
* memory and the block heuristics.
*/
int
spa_ld_log_spacemaps(spa_t *spa)
{
int error;
spa_log_sm_set_blocklimit(spa);
for (uint64_t c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[c];
error = spa_ld_unflushed_txgs(vd);
if (error != 0)
return (error);
}
error = spa_ld_log_sm_metadata(spa);
if (error != 0)
return (error);
/*
* Note: we don't actually expect anything to change at this point
* but we grab the config lock so we don't fail any assertions
* when using vdev_lookup_top().
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
error = spa_ld_log_sm_data(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
return (error);
}
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_max_mem_amt, U64, ZMOD_RW,
"Specific hard-limit in memory that ZFS allows to be used for "
"unflushed changes");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_max_mem_ppm, U64, ZMOD_RW,
"Percentage of the overall system memory that ZFS allows to be "
"used for unflushed changes (value is calculated over 1000000 for "
"finer granularity)");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_block_max, U64, ZMOD_RW,
"Hard limit (upper-bound) in the size of the space map log "
"in terms of blocks.");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_block_min, U64, ZMOD_RW,
"Lower-bound limit for the maximum amount of blocks allowed in "
"log spacemap (see zfs_unflushed_log_block_max)");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_txg_max, U64, ZMOD_RW,
"Hard limit (upper-bound) in the size of the space map log "
"in terms of dirty TXGs.");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_block_pct, UINT, ZMOD_RW,
"Tunable used to determine the number of blocks that can be used for "
"the spacemap log, expressed as a percentage of the total number of "
"metaslabs in the pool (e.g. 400 means the number of log blocks is "
"capped at 4 times the number of metaslabs)");
ZFS_MODULE_PARAM(zfs, zfs_, max_log_walking, U64, ZMOD_RW,
"The number of past TXGs that the flushing algorithm of the log "
"spacemap feature uses to estimate incoming log blocks");
ZFS_MODULE_PARAM(zfs, zfs_, keep_log_spacemaps_at_export, INT, ZMOD_RW,
"Prevent the log spacemaps from being flushed and destroyed "
"during pool export/destroy");
ZFS_MODULE_PARAM(zfs, zfs_, max_logsm_summary_length, U64, ZMOD_RW,
"Maximum number of rows allowed in the summary of the spacemap log");
ZFS_MODULE_PARAM(zfs, zfs_, min_metaslabs_to_flush, U64, ZMOD_RW,
"Minimum number of metaslabs to flush per dirty TXG");
diff --git a/sys/contrib/openzfs/module/zfs/space_map.c b/sys/contrib/openzfs/module/zfs/space_map.c
index a336ff41eadb..36e15b8d73af 100644
--- a/sys/contrib/openzfs/module/zfs/space_map.c
+++ b/sys/contrib/openzfs/module/zfs/space_map.c
@@ -1,1108 +1,1111 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dnode.h>
#include <sys/dsl_pool.h>
#include <sys/zio.h>
#include <sys/space_map.h>
#include <sys/zfeature.h>
/*
* Note on space map block size:
*
* The data for a given space map can be kept on blocks of any size.
* Larger blocks entail fewer I/O operations, but they also cause the
* DMU to keep more data in-core, and also to waste more I/O bandwidth
* when only a few blocks have changed since the last transaction group.
*/
/*
* Enabled whenever we want to stress test the use of double-word
* space map entries.
*/
boolean_t zfs_force_some_double_word_sm_entries = B_FALSE;
/*
* Override the default indirect block size of 128K, instead use 16K for
* spacemaps (2^14 bytes). This dramatically reduces write inflation since
* appending to a spacemap typically has to write one data block (4KB) and one
* or two indirect blocks (16K-32K, rather than 128K).
*/
int space_map_ibs = 14;
boolean_t
sm_entry_is_debug(uint64_t e)
{
return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX);
}
boolean_t
sm_entry_is_single_word(uint64_t e)
{
uint8_t prefix = SM_PREFIX_DECODE(e);
return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX);
}
boolean_t
sm_entry_is_double_word(uint64_t e)
{
return (SM_PREFIX_DECODE(e) == SM2_PREFIX);
}
/*
* Iterate through the space map, invoking the callback on each (non-debug)
* space map entry. Stop after reading 'end' bytes of the space map.
*/
int
space_map_iterate(space_map_t *sm, uint64_t end, sm_cb_t callback, void *arg)
{
uint64_t blksz = sm->sm_blksz;
ASSERT3U(blksz, !=, 0);
ASSERT3U(end, <=, space_map_length(sm));
ASSERT0(P2PHASE(end, sizeof (uint64_t)));
dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, end,
ZIO_PRIORITY_SYNC_READ);
int error = 0;
uint64_t txg = 0, sync_pass = 0;
for (uint64_t block_base = 0; block_base < end && error == 0;
block_base += blksz) {
dmu_buf_t *db;
error = dmu_buf_hold(sm->sm_os, space_map_object(sm),
block_base, FTAG, &db, DMU_READ_PREFETCH);
if (error != 0)
return (error);
uint64_t *block_start = db->db_data;
uint64_t block_length = MIN(end - block_base, blksz);
uint64_t *block_end = block_start +
(block_length / sizeof (uint64_t));
VERIFY0(P2PHASE(block_length, sizeof (uint64_t)));
VERIFY3U(block_length, !=, 0);
ASSERT3U(blksz, ==, db->db_size);
for (uint64_t *block_cursor = block_start;
block_cursor < block_end && error == 0; block_cursor++) {
uint64_t e = *block_cursor;
if (sm_entry_is_debug(e)) {
/*
* Debug entries are only needed to record the
* current TXG and sync pass if available.
*
* Note though that sometimes there can be
* debug entries that are used as padding
* at the end of space map blocks in-order
* to not split a double-word entry in the
* middle between two blocks. These entries
* have their TXG field set to 0 and we
* skip them without recording the TXG.
* [see comment in space_map_write_seg()]
*/
uint64_t e_txg = SM_DEBUG_TXG_DECODE(e);
if (e_txg != 0) {
txg = e_txg;
sync_pass = SM_DEBUG_SYNCPASS_DECODE(e);
} else {
ASSERT0(SM_DEBUG_SYNCPASS_DECODE(e));
}
continue;
}
uint64_t raw_offset, raw_run, vdev_id;
maptype_t type;
if (sm_entry_is_single_word(e)) {
type = SM_TYPE_DECODE(e);
vdev_id = SM_NO_VDEVID;
raw_offset = SM_OFFSET_DECODE(e);
raw_run = SM_RUN_DECODE(e);
} else {
/* it is a two-word entry */
ASSERT(sm_entry_is_double_word(e));
raw_run = SM2_RUN_DECODE(e);
vdev_id = SM2_VDEV_DECODE(e);
/* move on to the second word */
block_cursor++;
e = *block_cursor;
VERIFY3P(block_cursor, <=, block_end);
type = SM2_TYPE_DECODE(e);
raw_offset = SM2_OFFSET_DECODE(e);
}
uint64_t entry_offset = (raw_offset << sm->sm_shift) +
sm->sm_start;
uint64_t entry_run = raw_run << sm->sm_shift;
VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
ASSERT3U(entry_offset, >=, sm->sm_start);
ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size);
ASSERT3U(entry_run, <=, sm->sm_size);
ASSERT3U(entry_offset + entry_run, <=,
sm->sm_start + sm->sm_size);
space_map_entry_t sme = {
.sme_type = type,
.sme_vdev = vdev_id,
.sme_offset = entry_offset,
.sme_run = entry_run,
.sme_txg = txg,
.sme_sync_pass = sync_pass
};
error = callback(&sme, arg);
}
dmu_buf_rele(db, FTAG);
}
return (error);
}
/*
* Reads the entries from the last block of the space map into
* buf in reverse order. Populates nwords with number of words
* in the last block.
*
* Refer to block comment within space_map_incremental_destroy()
* to understand why this function is needed.
*/
static int
space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf,
uint64_t bufsz, uint64_t *nwords)
{
int error = 0;
dmu_buf_t *db;
/*
* Find the offset of the last word in the space map and use
* that to read the last block of the space map with
* dmu_buf_hold().
*/
uint64_t last_word_offset =
sm->sm_phys->smp_length - sizeof (uint64_t);
error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset,
FTAG, &db, DMU_READ_NO_PREFETCH);
if (error != 0)
return (error);
ASSERT3U(sm->sm_object, ==, db->db_object);
ASSERT3U(sm->sm_blksz, ==, db->db_size);
ASSERT3U(bufsz, >=, db->db_size);
ASSERT(nwords != NULL);
uint64_t *words = db->db_data;
*nwords =
(sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t));
uint64_t n = *nwords;
uint64_t j = n - 1;
for (uint64_t i = 0; i < n; i++) {
uint64_t entry = words[i];
if (sm_entry_is_double_word(entry)) {
/*
* Since we are populating the buffer backwards
* we have to be extra careful and add the two
* words of the double-word entry in the right
* order.
*/
ASSERT3U(j, >, 0);
buf[j - 1] = entry;
i++;
ASSERT3U(i, <, n);
entry = words[i];
buf[j] = entry;
j -= 2;
} else {
ASSERT(sm_entry_is_debug(entry) ||
sm_entry_is_single_word(entry));
buf[j] = entry;
j--;
}
}
/*
* Assert that we wrote backwards all the
* way to the beginning of the buffer.
*/
ASSERT3S(j, ==, -1);
dmu_buf_rele(db, FTAG);
return (error);
}
/*
* Note: This function performs destructive actions - specifically
* it deletes entries from the end of the space map. Thus, callers
* should ensure that they are holding the appropriate locks for
* the space map that they provide.
*/
int
space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
dmu_tx_t *tx)
{
uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
uint64_t *buf = zio_buf_alloc(bufsz);
dmu_buf_will_dirty(sm->sm_dbuf, tx);
/*
* Ideally we would want to iterate from the beginning of the
* space map to the end in incremental steps. The issue with this
* approach is that we don't have any field on-disk that points
* us where to start between each step. We could try zeroing out
* entries that we've destroyed, but this doesn't work either as
* an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
*
* As a result, we destroy its entries incrementally starting from
* the end after applying the callback to each of them.
*
* The problem with this approach is that we cannot literally
* iterate through the words in the space map backwards as we
* can't distinguish two-word space map entries from their second
* word. Thus we do the following:
*
* 1] We get all the entries from the last block of the space map
* and put them into a buffer in reverse order. This way the
* last entry comes first in the buffer, the second to last is
* second, etc.
* 2] We iterate through the entries in the buffer and we apply
* the callback to each one. As we move from entry to entry we
* we decrease the size of the space map, deleting effectively
* each entry.
* 3] If there are no more entries in the space map or the callback
* returns a value other than 0, we stop iterating over the
* space map. If there are entries remaining and the callback
* returned 0, we go back to step [1].
*/
int error = 0;
while (space_map_length(sm) > 0 && error == 0) {
uint64_t nwords = 0;
error = space_map_reversed_last_block_entries(sm, buf, bufsz,
&nwords);
if (error != 0)
break;
ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t));
for (uint64_t i = 0; i < nwords; i++) {
uint64_t e = buf[i];
if (sm_entry_is_debug(e)) {
sm->sm_phys->smp_length -= sizeof (uint64_t);
continue;
}
int words = 1;
uint64_t raw_offset, raw_run, vdev_id;
maptype_t type;
if (sm_entry_is_single_word(e)) {
type = SM_TYPE_DECODE(e);
vdev_id = SM_NO_VDEVID;
raw_offset = SM_OFFSET_DECODE(e);
raw_run = SM_RUN_DECODE(e);
} else {
ASSERT(sm_entry_is_double_word(e));
words = 2;
raw_run = SM2_RUN_DECODE(e);
vdev_id = SM2_VDEV_DECODE(e);
/* move to the second word */
i++;
e = buf[i];
ASSERT3P(i, <=, nwords);
type = SM2_TYPE_DECODE(e);
raw_offset = SM2_OFFSET_DECODE(e);
}
uint64_t entry_offset =
(raw_offset << sm->sm_shift) + sm->sm_start;
uint64_t entry_run = raw_run << sm->sm_shift;
VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
VERIFY3U(entry_offset, >=, sm->sm_start);
VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size);
VERIFY3U(entry_run, <=, sm->sm_size);
VERIFY3U(entry_offset + entry_run, <=,
sm->sm_start + sm->sm_size);
space_map_entry_t sme = {
.sme_type = type,
.sme_vdev = vdev_id,
.sme_offset = entry_offset,
.sme_run = entry_run
};
error = callback(&sme, arg);
if (error != 0)
break;
if (type == SM_ALLOC)
sm->sm_phys->smp_alloc -= entry_run;
else
sm->sm_phys->smp_alloc += entry_run;
sm->sm_phys->smp_length -= words * sizeof (uint64_t);
}
}
if (space_map_length(sm) == 0) {
ASSERT0(error);
ASSERT0(space_map_allocated(sm));
}
zio_buf_free(buf, bufsz);
return (error);
}
typedef struct space_map_load_arg {
space_map_t *smla_sm;
- range_tree_t *smla_rt;
+ zfs_range_tree_t *smla_rt;
maptype_t smla_type;
} space_map_load_arg_t;
static int
space_map_load_callback(space_map_entry_t *sme, void *arg)
{
space_map_load_arg_t *smla = arg;
if (sme->sme_type == smla->smla_type) {
- VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=,
+ VERIFY3U(zfs_range_tree_space(smla->smla_rt) + sme->sme_run, <=,
smla->smla_sm->sm_size);
- range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run);
+ zfs_range_tree_add(smla->smla_rt, sme->sme_offset,
+ sme->sme_run);
} else {
- range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run);
+ zfs_range_tree_remove(smla->smla_rt, sme->sme_offset,
+ sme->sme_run);
}
return (0);
}
/*
* Load the spacemap into the rangetree, like space_map_load. But only
* read the first 'length' bytes of the spacemap.
*/
int
-space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
+space_map_load_length(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype,
uint64_t length)
{
space_map_load_arg_t smla;
- VERIFY0(range_tree_space(rt));
+ VERIFY0(zfs_range_tree_space(rt));
if (maptype == SM_FREE)
- range_tree_add(rt, sm->sm_start, sm->sm_size);
+ zfs_range_tree_add(rt, sm->sm_start, sm->sm_size);
smla.smla_rt = rt;
smla.smla_sm = sm;
smla.smla_type = maptype;
int err = space_map_iterate(sm, length,
space_map_load_callback, &smla);
if (err != 0)
- range_tree_vacate(rt, NULL, NULL);
+ zfs_range_tree_vacate(rt, NULL, NULL);
return (err);
}
/*
* Load the space map disk into the specified range tree. Segments of maptype
* are added to the range tree, other segment types are removed.
*/
int
-space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
+space_map_load(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype)
{
return (space_map_load_length(sm, rt, maptype, space_map_length(sm)));
}
void
space_map_histogram_clear(space_map_t *sm)
{
if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
return;
memset(sm->sm_phys->smp_histogram, 0,
sizeof (sm->sm_phys->smp_histogram));
}
boolean_t
-space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
+space_map_histogram_verify(space_map_t *sm, zfs_range_tree_t *rt)
{
/*
* Verify that the in-core range tree does not have any
* ranges smaller than our sm_shift size.
*/
for (int i = 0; i < sm->sm_shift; i++) {
if (rt->rt_histogram[i] != 0)
return (B_FALSE);
}
return (B_TRUE);
}
void
-space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
+space_map_histogram_add(space_map_t *sm, zfs_range_tree_t *rt, dmu_tx_t *tx)
{
int idx = 0;
ASSERT(dmu_tx_is_syncing(tx));
VERIFY3U(space_map_object(sm), !=, 0);
if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
return;
dmu_buf_will_dirty(sm->sm_dbuf, tx);
ASSERT(space_map_histogram_verify(sm, rt));
/*
* Transfer the content of the range tree histogram to the space
* map histogram. The space map histogram contains 32 buckets ranging
* between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
* however, can represent ranges from 2^0 to 2^63. Since the space
* map only cares about allocatable blocks (minimum of sm_shift) we
* can safely ignore all ranges in the range tree smaller than sm_shift.
*/
- for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
+ for (int i = sm->sm_shift; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
/*
* Since the largest histogram bucket in the space map is
* 2^(32+sm_shift-1), we need to normalize the values in
* the range tree for any bucket larger than that size. For
* example given an sm_shift of 9, ranges larger than 2^40
* would get normalized as if they were 1TB ranges. Assume
* the range tree had a count of 5 in the 2^44 (16TB) bucket,
* the calculation below would normalize this to 5 * 2^4 (16).
*/
ASSERT3U(i, >=, idx + sm->sm_shift);
sm->sm_phys->smp_histogram[idx] +=
rt->rt_histogram[i] << (i - idx - sm->sm_shift);
/*
* Increment the space map's index as long as we haven't
* reached the maximum bucket size. Accumulate all ranges
* larger than the max bucket size into the last bucket.
*/
if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
ASSERT3U(idx + sm->sm_shift, ==, i);
idx++;
ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
}
}
}
static void
space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx)
{
dmu_buf_will_dirty(sm->sm_dbuf, tx);
uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
SM_DEBUG_ACTION_ENCODE(maptype) |
SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) |
SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_length,
sizeof (dentry), &dentry, tx);
sm->sm_phys->smp_length += sizeof (dentry);
}
/*
* Writes one or more entries given a segment.
*
* Note: The function may release the dbuf from the pointer initially
* passed to it, and return a different dbuf. Also, the space map's
* dbuf must be dirty for the changes in sm_phys to take effect.
*/
static void
space_map_write_seg(space_map_t *sm, uint64_t rstart, uint64_t rend,
maptype_t maptype, uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp,
const void *tag, dmu_tx_t *tx)
{
ASSERT3U(words, !=, 0);
ASSERT3U(words, <=, 2);
/* ensure the vdev_id can be represented by the space map */
ASSERT3U(vdev_id, <=, SM_NO_VDEVID);
/*
* if this is a single word entry, ensure that no vdev was
* specified.
*/
IMPLY(words == 1, vdev_id == SM_NO_VDEVID);
dmu_buf_t *db = *dbp;
ASSERT3U(db->db_size, ==, sm->sm_blksz);
uint64_t *block_base = db->db_data;
uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t));
uint64_t *block_cursor = block_base +
(sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
ASSERT3P(block_cursor, <=, block_end);
uint64_t size = (rend - rstart) >> sm->sm_shift;
uint64_t start = (rstart - sm->sm_start) >> sm->sm_shift;
uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX;
ASSERT3U(rstart, >=, sm->sm_start);
ASSERT3U(rstart, <, sm->sm_start + sm->sm_size);
ASSERT3U(rend - rstart, <=, sm->sm_size);
ASSERT3U(rend, <=, sm->sm_start + sm->sm_size);
while (size != 0) {
ASSERT3P(block_cursor, <=, block_end);
/*
* If we are at the end of this block, flush it and start
* writing again from the beginning.
*/
if (block_cursor == block_end) {
dmu_buf_rele(db, tag);
uint64_t next_word_offset = sm->sm_phys->smp_length;
VERIFY0(dmu_buf_hold(sm->sm_os,
space_map_object(sm), next_word_offset,
tag, &db, DMU_READ_PREFETCH));
dmu_buf_will_dirty(db, tx);
/* update caller's dbuf */
*dbp = db;
ASSERT3U(db->db_size, ==, sm->sm_blksz);
block_base = db->db_data;
block_cursor = block_base;
block_end = block_base +
(db->db_size / sizeof (uint64_t));
}
/*
* If we are writing a two-word entry and we only have one
* word left on this block, just pad it with an empty debug
* entry and write the two-word entry in the next block.
*/
uint64_t *next_entry = block_cursor + 1;
if (next_entry == block_end && words > 1) {
ASSERT3U(words, ==, 2);
*block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
SM_DEBUG_ACTION_ENCODE(0) |
SM_DEBUG_SYNCPASS_ENCODE(0) |
SM_DEBUG_TXG_ENCODE(0);
block_cursor++;
sm->sm_phys->smp_length += sizeof (uint64_t);
ASSERT3P(block_cursor, ==, block_end);
continue;
}
uint64_t run_len = MIN(size, run_max);
switch (words) {
case 1:
*block_cursor = SM_OFFSET_ENCODE(start) |
SM_TYPE_ENCODE(maptype) |
SM_RUN_ENCODE(run_len);
block_cursor++;
break;
case 2:
/* write the first word of the entry */
*block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) |
SM2_RUN_ENCODE(run_len) |
SM2_VDEV_ENCODE(vdev_id);
block_cursor++;
/* move on to the second word of the entry */
ASSERT3P(block_cursor, <, block_end);
*block_cursor = SM2_TYPE_ENCODE(maptype) |
SM2_OFFSET_ENCODE(start);
block_cursor++;
break;
default:
panic("%d-word space map entries are not supported",
words);
break;
}
sm->sm_phys->smp_length += words * sizeof (uint64_t);
start += run_len;
size -= run_len;
}
ASSERT0(size);
}
/*
* Note: The space map's dbuf must be dirty for the changes in sm_phys to
* take effect.
*/
static void
-space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
+space_map_write_impl(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype,
uint64_t vdev_id, dmu_tx_t *tx)
{
spa_t *spa = tx->tx_pool->dp_spa;
dmu_buf_t *db;
space_map_write_intro_debug(sm, maptype, tx);
#ifdef ZFS_DEBUG
/*
* We do this right after we write the intro debug entry
* because the estimate does not take it into account.
*/
uint64_t initial_objsize = sm->sm_phys->smp_length;
uint64_t estimated_growth =
space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID);
uint64_t estimated_final_objsize = initial_objsize + estimated_growth;
#endif
/*
* Find the offset right after the last word in the space map
* and use that to get a hold of the last block, so we can
* start appending to it.
*/
uint64_t next_word_offset = sm->sm_phys->smp_length;
VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm),
next_word_offset, FTAG, &db, DMU_READ_PREFETCH));
ASSERT3U(db->db_size, ==, sm->sm_blksz);
dmu_buf_will_dirty(db, tx);
zfs_btree_t *t = &rt->rt_root;
zfs_btree_index_t where;
- for (range_seg_t *rs = zfs_btree_first(t, &where); rs != NULL;
+ for (zfs_range_seg_t *rs = zfs_btree_first(t, &where); rs != NULL;
rs = zfs_btree_next(t, &where, &where)) {
- uint64_t offset = (rs_get_start(rs, rt) - sm->sm_start) >>
- sm->sm_shift;
- uint64_t length = (rs_get_end(rs, rt) - rs_get_start(rs, rt)) >>
+ uint64_t offset = (zfs_rs_get_start(rs, rt) - sm->sm_start) >>
sm->sm_shift;
+ uint64_t length = (zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt)) >> sm->sm_shift;
uint8_t words = 1;
/*
* We only write two-word entries when both of the following
* are true:
*
* [1] The feature is enabled.
* [2] The offset or run is too big for a single-word entry,
* or the vdev_id is set (meaning not equal to
* SM_NO_VDEVID).
*
* Note that for purposes of testing we've added the case that
* we write two-word entries occasionally when the feature is
* enabled and zfs_force_some_double_word_sm_entries has been
* set.
*/
if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) &&
(offset >= (1ULL << SM_OFFSET_BITS) ||
length > SM_RUN_MAX ||
vdev_id != SM_NO_VDEVID ||
(zfs_force_some_double_word_sm_entries &&
random_in_range(100) == 0)))
words = 2;
- space_map_write_seg(sm, rs_get_start(rs, rt), rs_get_end(rs,
- rt), maptype, vdev_id, words, &db, FTAG, tx);
+ space_map_write_seg(sm, zfs_rs_get_start(rs, rt),
+ zfs_rs_get_end(rs, rt), maptype, vdev_id, words, &db,
+ FTAG, tx);
}
dmu_buf_rele(db, FTAG);
#ifdef ZFS_DEBUG
/*
* We expect our estimation to be based on the worst case
* scenario [see comment in space_map_estimate_optimal_size()].
* Therefore we expect the actual objsize to be equal or less
* than whatever we estimated it to be.
*/
ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_length);
#endif
}
/*
* Note: This function manipulates the state of the given space map but
* does not hold any locks implicitly. Thus the caller is responsible
* for synchronizing writes to the space map.
*/
void
-space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
+space_map_write(space_map_t *sm, zfs_range_tree_t *rt, maptype_t maptype,
uint64_t vdev_id, dmu_tx_t *tx)
{
ASSERT(dsl_pool_sync_context(dmu_objset_pool(sm->sm_os)));
VERIFY3U(space_map_object(sm), !=, 0);
dmu_buf_will_dirty(sm->sm_dbuf, tx);
/*
* This field is no longer necessary since the in-core space map
* now contains the object number but is maintained for backwards
* compatibility.
*/
sm->sm_phys->smp_object = sm->sm_object;
- if (range_tree_is_empty(rt)) {
+ if (zfs_range_tree_is_empty(rt)) {
VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
return;
}
if (maptype == SM_ALLOC)
- sm->sm_phys->smp_alloc += range_tree_space(rt);
+ sm->sm_phys->smp_alloc += zfs_range_tree_space(rt);
else
- sm->sm_phys->smp_alloc -= range_tree_space(rt);
+ sm->sm_phys->smp_alloc -= zfs_range_tree_space(rt);
uint64_t nodes = zfs_btree_numnodes(&rt->rt_root);
- uint64_t rt_space = range_tree_space(rt);
+ uint64_t rt_space = zfs_range_tree_space(rt);
space_map_write_impl(sm, rt, maptype, vdev_id, tx);
/*
* Ensure that the space_map's accounting wasn't changed
* while we were in the middle of writing it out.
*/
VERIFY3U(nodes, ==, zfs_btree_numnodes(&rt->rt_root));
- VERIFY3U(range_tree_space(rt), ==, rt_space);
+ VERIFY3U(zfs_range_tree_space(rt), ==, rt_space);
}
static int
space_map_open_impl(space_map_t *sm)
{
int error;
u_longlong_t blocks;
error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
if (error)
return (error);
dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
sm->sm_phys = sm->sm_dbuf->db_data;
return (0);
}
int
space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
uint64_t start, uint64_t size, uint8_t shift)
{
space_map_t *sm;
int error;
ASSERT(*smp == NULL);
ASSERT(os != NULL);
ASSERT(object != 0);
sm = kmem_alloc(sizeof (space_map_t), KM_SLEEP);
sm->sm_start = start;
sm->sm_size = size;
sm->sm_shift = shift;
sm->sm_os = os;
sm->sm_object = object;
sm->sm_blksz = 0;
sm->sm_dbuf = NULL;
sm->sm_phys = NULL;
error = space_map_open_impl(sm);
if (error != 0) {
space_map_close(sm);
return (error);
}
*smp = sm;
return (0);
}
void
space_map_close(space_map_t *sm)
{
if (sm == NULL)
return;
if (sm->sm_dbuf != NULL)
dmu_buf_rele(sm->sm_dbuf, sm);
sm->sm_dbuf = NULL;
sm->sm_phys = NULL;
kmem_free(sm, sizeof (*sm));
}
void
space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
{
objset_t *os = sm->sm_os;
spa_t *spa = dmu_objset_spa(os);
dmu_object_info_t doi;
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
ASSERT(dmu_tx_is_syncing(tx));
VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
dmu_object_info_from_db(sm->sm_dbuf, &doi);
/*
* If the space map has the wrong bonus size (because
* SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
* the wrong block size (because space_map_blksz has changed),
* free and re-allocate its object with the updated sizes.
*
* Otherwise, just truncate the current object.
*/
if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
doi.doi_data_block_size != blocksize ||
doi.doi_metadata_block_size != 1 << space_map_ibs) {
zfs_dbgmsg("txg %llu, spa %s, sm %px, reallocating "
"object[%llu]: old bonus %llu, old blocksz %u",
(u_longlong_t)dmu_tx_get_txg(tx), spa_name(spa), sm,
(u_longlong_t)sm->sm_object,
(u_longlong_t)doi.doi_bonus_size,
doi.doi_data_block_size);
space_map_free(sm, tx);
dmu_buf_rele(sm->sm_dbuf, sm);
sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
VERIFY0(space_map_open_impl(sm));
} else {
VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
/*
* If the spacemap is reallocated, its histogram
* will be reset. Do the same in the common case so that
* bugs related to the uncommon case do not go unnoticed.
*/
memset(sm->sm_phys->smp_histogram, 0,
sizeof (sm->sm_phys->smp_histogram));
}
dmu_buf_will_dirty(sm->sm_dbuf, tx);
sm->sm_phys->smp_length = 0;
sm->sm_phys->smp_alloc = 0;
}
uint64_t
space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(os);
uint64_t object;
int bonuslen;
if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
bonuslen = sizeof (space_map_phys_t);
ASSERT3U(bonuslen, <=, dmu_bonus_max());
} else {
bonuslen = SPACE_MAP_SIZE_V0;
}
object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize,
space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
return (object);
}
void
space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(os);
if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(os, smobj, &doi));
if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
spa_feature_decr(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
}
}
VERIFY0(dmu_object_free(os, smobj, tx));
}
void
space_map_free(space_map_t *sm, dmu_tx_t *tx)
{
if (sm == NULL)
return;
space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
sm->sm_object = 0;
}
/*
* Given a range tree, it makes a worst-case estimate of how much
* space would the tree's segments take if they were written to
* the given space map.
*/
uint64_t
-space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt,
+space_map_estimate_optimal_size(space_map_t *sm, zfs_range_tree_t *rt,
uint64_t vdev_id)
{
spa_t *spa = dmu_objset_spa(sm->sm_os);
uint64_t shift = sm->sm_shift;
uint64_t *histogram = rt->rt_histogram;
uint64_t entries_for_seg = 0;
/*
* In order to get a quick estimate of the optimal size that this
* range tree would have on-disk as a space map, we iterate through
* its histogram buckets instead of iterating through its nodes.
*
* Note that this is a highest-bound/worst-case estimate for the
* following reasons:
*
* 1] We assume that we always add a debug padding for each block
* we write and we also assume that we start at the last word
* of a block attempting to write a two-word entry.
* 2] Rounding up errors due to the way segments are distributed
* in the buckets of the range tree's histogram.
* 3] The activation of zfs_force_some_double_word_sm_entries
* (tunable) when testing.
*
* = Math and Rounding Errors =
*
* rt_histogram[i] bucket of a range tree represents the number
* of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
* that, we want to divide the buckets into groups: Buckets that
* can be represented using a single-word entry, ones that can
* be represented with a double-word entry, and ones that can
* only be represented with multiple two-word entries.
*
* [Note that if the new encoding feature is not enabled there
* are only two groups: single-word entry buckets and multiple
* single-word entry buckets. The information below assumes
* two-word entries enabled, but it can easily applied when
* the feature is not enabled]
*
* To find the highest bucket that can be represented with a
* single-word entry we look at the maximum run that such entry
* can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
* the run of a space map entry is shifted by sm_shift, thus we
* add it to the exponent]. This way, excluding the value of the
* maximum run that can be represented by a single-word entry,
* all runs that are smaller exist in buckets 0 to
* SM_RUN_BITS + shift - 1.
*
* To find the highest bucket that can be represented with a
* double-word entry, we follow the same approach. Finally, any
* bucket higher than that are represented with multiple two-word
* entries. To be more specific, if the highest bucket whose
* segments can be represented with a single two-word entry is X,
* then bucket X+1 will need 2 two-word entries for each of its
* segments, X+2 will need 4, X+3 will need 8, ...etc.
*
* With all of the above we make our estimation based on bucket
* groups. There is a rounding error though. As we mentioned in
* the example with the one-word entry, the maximum run that can
* be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
* not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
* that length fall into the next bucket (and bucket group) where
* we start counting two-word entries and this is one more reason
* why the estimated size may end up being bigger than the actual
* size written.
*/
uint64_t size = 0;
uint64_t idx = 0;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) ||
(vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) {
/*
* If we are trying to force some double word entries just
* assume the worst-case of every single word entry being
* written as a double word entry.
*/
uint64_t entry_size =
(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) &&
zfs_force_some_double_word_sm_entries) ?
(2 * sizeof (uint64_t)) : sizeof (uint64_t);
uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1;
for (; idx <= single_entry_max_bucket; idx++)
size += histogram[idx] * entry_size;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) {
- for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
+ for (; idx < ZFS_RANGE_TREE_HISTOGRAM_SIZE; idx++) {
ASSERT3U(idx, >=, single_entry_max_bucket);
entries_for_seg =
1ULL << (idx - single_entry_max_bucket);
size += histogram[idx] *
entries_for_seg * entry_size;
}
return (size);
}
}
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2));
uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1;
for (; idx <= double_entry_max_bucket; idx++)
size += histogram[idx] * 2 * sizeof (uint64_t);
- for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
+ for (; idx < ZFS_RANGE_TREE_HISTOGRAM_SIZE; idx++) {
ASSERT3U(idx, >=, double_entry_max_bucket);
entries_for_seg = 1ULL << (idx - double_entry_max_bucket);
size += histogram[idx] *
entries_for_seg * 2 * sizeof (uint64_t);
}
/*
* Assume the worst case where we start with the padding at the end
* of the current block and we add an extra padding entry at the end
* of all subsequent blocks.
*/
size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t);
return (size);
}
uint64_t
space_map_object(space_map_t *sm)
{
return (sm != NULL ? sm->sm_object : 0);
}
int64_t
space_map_allocated(space_map_t *sm)
{
return (sm != NULL ? sm->sm_phys->smp_alloc : 0);
}
uint64_t
space_map_length(space_map_t *sm)
{
return (sm != NULL ? sm->sm_phys->smp_length : 0);
}
uint64_t
space_map_nblocks(space_map_t *sm)
{
if (sm == NULL)
return (0);
return (DIV_ROUND_UP(space_map_length(sm), sm->sm_blksz));
}
diff --git a/sys/contrib/openzfs/module/zfs/space_reftree.c b/sys/contrib/openzfs/module/zfs/space_reftree.c
index ee11e162dd5b..baa741395e0c 100644
--- a/sys/contrib/openzfs/module/zfs/space_reftree.c
+++ b/sys/contrib/openzfs/module/zfs/space_reftree.c
@@ -1,152 +1,153 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2019 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/range_tree.h>
#include <sys/space_reftree.h>
/*
* Space reference trees.
*
* A range tree is a collection of integers. Every integer is either
* in the tree, or it's not. A space reference tree generalizes
* the idea: it allows its members to have arbitrary reference counts,
* as opposed to the implicit reference count of 0 or 1 in a range tree.
* This representation comes in handy when computing the union or
* intersection of multiple space maps. For example, the union of
* N range trees is the subset of the reference tree with refcnt >= 1.
* The intersection of N range trees is the subset with refcnt >= N.
*
* [It's very much like a Fourier transform. Unions and intersections
* are hard to perform in the 'range tree domain', so we convert the trees
* into the 'reference count domain', where it's trivial, then invert.]
*
* vdev_dtl_reassess() uses computations of this form to determine
* DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev
* has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev
* has an outage wherever refcnt >= vdev_children.
*/
static int
space_reftree_compare(const void *x1, const void *x2)
{
const space_ref_t *sr1 = (const space_ref_t *)x1;
const space_ref_t *sr2 = (const space_ref_t *)x2;
int cmp = TREE_CMP(sr1->sr_offset, sr2->sr_offset);
if (likely(cmp))
return (cmp);
return (TREE_PCMP(sr1, sr2));
}
void
space_reftree_create(avl_tree_t *t)
{
avl_create(t, space_reftree_compare,
sizeof (space_ref_t), offsetof(space_ref_t, sr_node));
}
void
space_reftree_destroy(avl_tree_t *t)
{
space_ref_t *sr;
void *cookie = NULL;
while ((sr = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(sr, sizeof (*sr));
avl_destroy(t);
}
static void
space_reftree_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt)
{
space_ref_t *sr;
sr = kmem_alloc(sizeof (*sr), KM_SLEEP);
sr->sr_offset = offset;
sr->sr_refcnt = refcnt;
avl_add(t, sr);
}
void
space_reftree_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
int64_t refcnt)
{
space_reftree_add_node(t, start, refcnt);
space_reftree_add_node(t, end, -refcnt);
}
/*
* Convert (or add) a range tree into a reference tree.
*/
void
-space_reftree_add_map(avl_tree_t *t, range_tree_t *rt, int64_t refcnt)
+space_reftree_add_map(avl_tree_t *t, zfs_range_tree_t *rt, int64_t refcnt)
{
zfs_btree_index_t where;
- for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs; rs =
- zfs_btree_next(&rt->rt_root, &where, &where)) {
- space_reftree_add_seg(t, rs_get_start(rs, rt), rs_get_end(rs,
- rt), refcnt);
+ for (zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
+ rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
+ space_reftree_add_seg(t, zfs_rs_get_start(rs, rt),
+ zfs_rs_get_end(rs, rt), refcnt);
}
}
/*
* Convert a reference tree into a range tree. The range tree will contain
* all members of the reference tree for which refcnt >= minref.
*/
void
-space_reftree_generate_map(avl_tree_t *t, range_tree_t *rt, int64_t minref)
+space_reftree_generate_map(avl_tree_t *t, zfs_range_tree_t *rt, int64_t minref)
{
uint64_t start = -1ULL;
int64_t refcnt = 0;
space_ref_t *sr;
- range_tree_vacate(rt, NULL, NULL);
+ zfs_range_tree_vacate(rt, NULL, NULL);
for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) {
refcnt += sr->sr_refcnt;
if (refcnt >= minref) {
if (start == -1ULL) {
start = sr->sr_offset;
}
} else {
if (start != -1ULL) {
uint64_t end = sr->sr_offset;
ASSERT(start <= end);
if (end > start)
- range_tree_add(rt, start, end - start);
+ zfs_range_tree_add(rt, start, end -
+ start);
start = -1ULL;
}
}
}
ASSERT(refcnt == 0);
ASSERT(start == -1ULL);
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index 5df2f77e5780..74e36c0300f0 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -1,6582 +1,6588 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
* Copyright (c) 2021, Klara Inc.
* Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
#include <sys/space_reftree.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_raidz.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_raidz.h>
#include <sys/zvol.h>
#include <sys/zfs_ratelimit.h>
#include "zfs_prop.h"
/*
* One metaslab from each (normal-class) vdev is used by the ZIL. These are
* called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
* part of the spa_embedded_log_class. The metaslab with the most free space
* in each vdev is selected for this purpose when the pool is opened (or a
* vdev is added). See vdev_metaslab_init().
*
* Log blocks can be allocated from the following locations. Each one is tried
* in order until the allocation succeeds:
* 1. dedicated log vdevs, aka "slog" (spa_log_class)
* 2. embedded slog metaslabs (spa_embedded_log_class)
* 3. other metaslabs in normal vdevs (spa_normal_class)
*
* zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
* than this number of metaslabs in the vdev. This ensures that we don't set
* aside an unreasonable amount of space for the ZIL. If set to less than
* 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
* (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
*/
static uint_t zfs_embedded_slog_min_ms = 64;
/* default target for number of metaslabs per top-level vdev */
static uint_t zfs_vdev_default_ms_count = 200;
/* minimum number of metaslabs per top-level vdev */
static uint_t zfs_vdev_min_ms_count = 16;
/* practical upper limit of total metaslabs per top-level vdev */
static uint_t zfs_vdev_ms_count_limit = 1ULL << 17;
/* lower limit for metaslab size (512M) */
static uint_t zfs_vdev_default_ms_shift = 29;
/* upper limit for metaslab size (16G) */
static uint_t zfs_vdev_max_ms_shift = 34;
int vdev_validate_skip = B_FALSE;
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
int zfs_vdev_dtl_sm_blksz = (1 << 12);
/*
* Rate limit slow IO (delay) events to this many per second.
*/
static unsigned int zfs_slow_io_events_per_second = 20;
/*
* Rate limit deadman "hung IO" events to this many per second.
*/
static unsigned int zfs_deadman_events_per_second = 1;
/*
* Rate limit direct write IO verify failures to this many per scond.
*/
static unsigned int zfs_dio_write_verify_events_per_second = 20;
/*
* Rate limit checksum events after this many checksum errors per second.
*/
static unsigned int zfs_checksum_events_per_second = 20;
/*
* Ignore errors during scrub/resilver. Allows to work around resilver
* upon import when there are pool errors.
*/
static int zfs_scan_ignore_errors = 0;
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
int zfs_vdev_standard_sm_blksz = (1 << 17);
/*
* Tunable parameter for debugging or performance analysis. Setting this
* will cause pool corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zfs_nocacheflush = 0;
/*
* Maximum and minimum ashift values that can be automatically set based on
* vdev's physical ashift (disk's physical sector size). While ASHIFT_MAX
* is higher than the maximum value, it is intentionally limited here to not
* excessively impact pool space efficiency. Higher ashift values may still
* be forced by vdev logical ashift or by user via ashift property, but won't
* be set automatically as a performance optimization.
*/
uint_t zfs_vdev_max_auto_ashift = 14;
uint_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
/*
* VDEV checksum verification for Direct I/O writes. This is neccessary for
* Linux, because anonymous pages can not be placed under write protection
* during Direct I/O writes.
*/
#if !defined(__FreeBSD__)
uint_t zfs_vdev_direct_write_verify = 1;
#else
uint_t zfs_vdev_direct_write_verify = 0;
#endif
void
vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
if (vd->vdev_path != NULL) {
zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
vd->vdev_path, buf);
} else {
zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id,
(u_longlong_t)vd->vdev_guid, buf);
}
}
void
vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
{
char state[20];
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
zfs_dbgmsg("%*svdev %llu: %s", indent, "",
(u_longlong_t)vd->vdev_id,
vd->vdev_ops->vdev_op_type);
return;
}
switch (vd->vdev_state) {
case VDEV_STATE_UNKNOWN:
(void) snprintf(state, sizeof (state), "unknown");
break;
case VDEV_STATE_CLOSED:
(void) snprintf(state, sizeof (state), "closed");
break;
case VDEV_STATE_OFFLINE:
(void) snprintf(state, sizeof (state), "offline");
break;
case VDEV_STATE_REMOVED:
(void) snprintf(state, sizeof (state), "removed");
break;
case VDEV_STATE_CANT_OPEN:
(void) snprintf(state, sizeof (state), "can't open");
break;
case VDEV_STATE_FAULTED:
(void) snprintf(state, sizeof (state), "faulted");
break;
case VDEV_STATE_DEGRADED:
(void) snprintf(state, sizeof (state), "degraded");
break;
case VDEV_STATE_HEALTHY:
(void) snprintf(state, sizeof (state), "healthy");
break;
default:
(void) snprintf(state, sizeof (state), "<state %u>",
(uint_t)vd->vdev_state);
}
zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
"", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
vd->vdev_islog ? " (log)" : "",
(u_longlong_t)vd->vdev_guid,
vd->vdev_path ? vd->vdev_path : "N/A", state);
for (uint64_t i = 0; i < vd->vdev_children; i++)
vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
}
/*
* Virtual device management.
*/
static vdev_ops_t *const vdev_ops_table[] = {
&vdev_root_ops,
&vdev_raidz_ops,
&vdev_draid_ops,
&vdev_draid_spare_ops,
&vdev_mirror_ops,
&vdev_replacing_ops,
&vdev_spare_ops,
&vdev_disk_ops,
&vdev_file_ops,
&vdev_missing_ops,
&vdev_hole_ops,
&vdev_indirect_ops,
NULL
};
/*
* Given a vdev type, return the appropriate ops vector.
*/
static vdev_ops_t *
vdev_getops(const char *type)
{
vdev_ops_t *ops, *const *opspp;
for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
if (strcmp(ops->vdev_op_type, type) == 0)
break;
return (ops);
}
/*
* Given a vdev and a metaslab class, find which metaslab group we're
* interested in. All vdevs may belong to two different metaslab classes.
* Dedicated slog devices use only the primary metaslab group, rather than a
* separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
*/
metaslab_group_t *
vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
{
if (mc == spa_embedded_log_class(vd->vdev_spa) &&
vd->vdev_log_mg != NULL)
return (vd->vdev_log_mg);
else
return (vd->vdev_mg);
}
void
-vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
- range_seg64_t *physical_rs, range_seg64_t *remain_rs)
+vdev_default_xlate(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
+ zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
{
(void) vd, (void) remain_rs;
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
}
/*
* Derive the enumerated allocation bias from string input.
* String origin is either the per-vdev zap or zpool(8).
*/
static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char *bias)
{
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
alloc_bias = VDEV_BIAS_LOG;
else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
alloc_bias = VDEV_BIAS_SPECIAL;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
alloc_bias = VDEV_BIAS_DEDUP;
return (alloc_bias);
}
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
*/
uint64_t
vdev_default_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
{
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize;
for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize_txg(vd->vdev_child[c], psize, txg);
asize = MAX(asize, csize);
}
return (asize);
}
uint64_t
vdev_default_min_asize(vdev_t *vd)
{
return (vd->vdev_min_asize);
}
/*
* Get the minimum allocatable size. We define the allocatable size as
* the vdev's asize rounded to the nearest metaslab. This allows us to
* replace or attach devices which don't have the same physical size but
* can still satisfy the same number of allocations.
*/
uint64_t
vdev_get_min_asize(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
/*
* If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
return (vd->vdev_asize);
/*
* The top-level vdev just returns the allocatable size rounded
* to the nearest metaslab.
*/
if (vd == vd->vdev_top)
return (P2ALIGN_TYPED(vd->vdev_asize, 1ULL << vd->vdev_ms_shift,
uint64_t));
return (pvd->vdev_ops->vdev_op_min_asize(pvd));
}
void
vdev_set_min_asize(vdev_t *vd)
{
vd->vdev_min_asize = vdev_get_min_asize(vd);
for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]);
}
/*
* Get the minimal allocation size for the top-level vdev.
*/
uint64_t
vdev_get_min_alloc(vdev_t *vd)
{
uint64_t min_alloc = 1ULL << vd->vdev_ashift;
if (vd->vdev_ops->vdev_op_min_alloc != NULL)
min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
return (min_alloc);
}
/*
* Get the parity level for a top-level vdev.
*/
uint64_t
vdev_get_nparity(vdev_t *vd)
{
uint64_t nparity = 0;
if (vd->vdev_ops->vdev_op_nparity != NULL)
nparity = vd->vdev_ops->vdev_op_nparity(vd);
return (nparity);
}
static int
vdev_prop_get_int(vdev_t *vd, vdev_prop_t prop, uint64_t *value)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
uint64_t objid;
int err;
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
return (EINVAL);
}
err = zap_lookup(mos, objid, vdev_prop_to_name(prop),
sizeof (uint64_t), 1, value);
if (err == ENOENT)
*value = vdev_prop_default_numeric(prop);
return (err);
}
/*
* Get the number of data disks for a top-level vdev.
*/
uint64_t
vdev_get_ndisks(vdev_t *vd)
{
uint64_t ndisks = 1;
if (vd->vdev_ops->vdev_op_ndisks != NULL)
ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
return (ndisks);
}
vdev_t *
vdev_lookup_top(spa_t *spa, uint64_t vdev)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (vdev < rvd->vdev_children) {
ASSERT(rvd->vdev_child[vdev] != NULL);
return (rvd->vdev_child[vdev]);
}
return (NULL);
}
vdev_t *
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{
vdev_t *mvd;
if (vd->vdev_guid == guid)
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL)
return (mvd);
return (NULL);
}
static int
vdev_count_leaves_impl(vdev_t *vd)
{
int n = 0;
if (vd->vdev_ops->vdev_op_leaf)
return (1);
for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n);
}
int
vdev_count_leaves(spa_t *spa)
{
int rc;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
rc = vdev_count_leaves_impl(spa->spa_root_vdev);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (rc);
}
void
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
{
size_t oldsize, newsize;
uint64_t id = cvd->vdev_id;
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(cvd->vdev_parent == NULL);
cvd->vdev_parent = pvd;
if (pvd == NULL)
return;
ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
oldsize = pvd->vdev_children * sizeof (vdev_t *);
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
memcpy(newchild, pvd->vdev_child, oldsize);
kmem_free(pvd->vdev_child, oldsize);
}
pvd->vdev_child = newchild;
pvd->vdev_child[id] = cvd;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += cvd->vdev_guid_sum;
if (cvd->vdev_ops->vdev_op_leaf) {
list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
cvd->vdev_spa->spa_leaf_list_gen++;
}
}
void
vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
{
int c;
uint_t id = cvd->vdev_id;
ASSERT(cvd->vdev_parent == pvd);
if (pvd == NULL)
return;
ASSERT(id < pvd->vdev_children);
ASSERT(pvd->vdev_child[id] == cvd);
pvd->vdev_child[id] = NULL;
cvd->vdev_parent = NULL;
for (c = 0; c < pvd->vdev_children; c++)
if (pvd->vdev_child[c])
break;
if (c == pvd->vdev_children) {
kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
pvd->vdev_child = NULL;
pvd->vdev_children = 0;
}
if (cvd->vdev_ops->vdev_op_leaf) {
spa_t *spa = cvd->vdev_spa;
list_remove(&spa->spa_leaf_list, cvd);
spa->spa_leaf_list_gen++;
}
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
}
/*
* Remove any holes in the child array.
*/
void
vdev_compact_children(vdev_t *pvd)
{
vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children;
int newc;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (oldc == 0)
return;
for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
newc++;
if (newc > 0) {
newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
for (int c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
newchild[newc] = cvd;
cvd->vdev_id = newc++;
}
}
} else {
newchild = NULL;
}
kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
pvd->vdev_child = newchild;
pvd->vdev_children = newc;
}
/*
* Allocate and minimally initialize a vdev_t.
*/
vdev_t *
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{
vdev_t *vd;
vdev_indirect_config_t *vic;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
vic = &vd->vdev_indirect_config;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
spa->spa_load_guid = spa_generate_load_guid();
}
if (guid == 0 && ops != &vdev_hole_ops) {
if (spa->spa_root_vdev == vd) {
/*
* The root vdev's guid will also be the pool guid,
* which must be unique among all pools.
*/
guid = spa_generate_guid(NULL);
} else {
/*
* Any other vdev's guid must be unique within the pool.
*/
guid = spa_generate_guid(spa);
}
ASSERT(!spa_guid_exists(spa_guid(spa), guid));
}
vd->vdev_spa = spa;
vd->vdev_id = id;
vd->vdev_guid = guid;
vd->vdev_guid_sum = guid;
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
vic->vic_prev_indirect_vdev = UINT64_MAX;
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
- vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
- 0, 0);
+ vd->vdev_obsolete_segments = zfs_range_tree_create(NULL,
+ ZFS_RANGE_SEG64, NULL, 0, 0);
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
* and checksum events so that we don't overwhelm ZED with thousands
* of events when a disk is acting up.
*/
zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_deadman_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_dio_verify_rl,
&zfs_dio_write_verify_events_per_second, 1);
zfs_ratelimit_init(&vd->vdev_checksum_rl,
&zfs_checksum_events_per_second, 1);
/*
* Default Thresholds for tuning ZED
*/
vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N);
vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T);
vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N);
vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T);
vd->vdev_slow_io_n = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_N);
vd->vdev_slow_io_t = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_T);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
list_link_init(&vd->vdev_initialize_node);
list_link_init(&vd->vdev_leaf_node);
list_link_init(&vd->vdev_trim_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
- vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
- 0);
+ vd->vdev_dtl[t] = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
+ NULL, 0, 0);
}
txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
return (vd);
}
/*
* Allocate a new vdev. The 'alloctype' is used to control whether we are
* creating a new vdev or loading an existing one - the behavior is slightly
* different for each case.
*/
int
vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
int alloctype)
{
vdev_ops_t *ops;
const char *type;
uint64_t guid = 0, islog;
vdev_t *vd;
vdev_indirect_config_t *vic;
const char *tmp = NULL;
int rc;
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
boolean_t top_level = (parent && !parent->vdev_parent);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
* Otherwise, vdev_alloc_common() will generate one for us.
*/
if (alloctype == VDEV_ALLOC_LOAD) {
uint64_t label_id;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
*/
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (SET_ERROR(ENOTSUP));
if (top_level && alloctype == VDEV_ALLOC_ADD) {
const char *bias;
/*
* If creating a top-level vdev, check for allocation
* classes input.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias) == 0) {
alloc_bias = vdev_derive_alloc_bias(bias);
/* spa_vdev_add() expects feature to be enabled */
if (spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa,
SPA_FEATURE_ALLOCATION_CLASSES)) {
return (SET_ERROR(ENOTSUP));
}
}
/* spa_vdev_add() expects feature to be enabled */
if (ops == &vdev_draid_ops &&
spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
return (SET_ERROR(ENOTSUP));
}
}
/*
* Initialize the vdev specific data. This is done before calling
* vdev_alloc_common() since it may fail and this simplifies the
* error reporting and cleanup code paths.
*/
void *tsd = NULL;
if (ops->vdev_op_init != NULL) {
rc = ops->vdev_op_init(spa, nv, &tsd);
if (rc != 0) {
return (rc);
}
}
vd = vdev_alloc_common(spa, id, guid, ops);
vd->vdev_tsd = tsd;
vd->vdev_islog = islog;
if (top_level && alloc_bias != VDEV_BIAS_NONE)
vd->vdev_alloc_bias = alloc_bias;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tmp) == 0)
vd->vdev_path = spa_strdup(tmp);
/*
* ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
* fault on a vdev and want it to persist across imports (like with
* zpool offline -f).
*/
rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_faulted = 1;
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &tmp) == 0)
vd->vdev_devid = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, &tmp) == 0)
vd->vdev_physpath = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&tmp) == 0)
vd->vdev_enc_sysfs_path = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &tmp) == 0)
vd->vdev_fru = spa_strdup(tmp);
/*
* Set the whole_disk property. If it's not specified, leave the value
* as -1.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&vd->vdev_wholedisk) != 0)
vd->vdev_wholedisk = -1ULL;
vic = &vd->vdev_indirect_config;
ASSERT0(vic->vic_mapping_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
&vic->vic_mapping_object);
ASSERT0(vic->vic_births_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
&vic->vic_births_object);
ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
&vic->vic_prev_indirect_vdev);
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&vd->vdev_not_present);
/*
* Get the alignment requirement. Ignore pool ashift for vdev
* attach case.
*/
if (alloctype != VDEV_ALLOC_ATTACH) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT,
&vd->vdev_ashift);
} else {
vd->vdev_attaching = B_TRUE;
}
/*
* Retrieve the vdev creation time.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
&vd->vdev_crtxg);
if (vd->vdev_ops == &vdev_root_ops &&
(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
&vd->vdev_root_zap);
}
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
if (top_level &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
&vd->vdev_ms_array);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
&vd->vdev_ms_shift);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
&vd->vdev_asize);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
&vd->vdev_noalloc);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
&vd->vdev_removing);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
&vd->vdev_top_zap);
vd->vdev_rz_expanding = nvlist_exists(nv,
ZPOOL_CONFIG_RAIDZ_EXPANDING);
} else {
ASSERT0(vd->vdev_top_zap);
}
if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL);
/* Note: metaslab_group_create() is now deferred */
}
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
} else {
ASSERT0(vd->vdev_leaf_zap);
}
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
&vd->vdev_dtl_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
&vd->vdev_unspare);
}
if (alloctype == VDEV_ALLOC_ROOTPOOL) {
uint64_t spare = 0;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare) == 0 && spare)
spa_spare_add(vd);
}
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
&vd->vdev_resilver_txg);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
&vd->vdev_rebuild_txg);
if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
vdev_defer_resilver(vd);
/*
* In general, when importing a pool we want to ignore the
* persistent fault state, as the diagnosis made on another
* system may not be valid in the current context. The only
* exception is if we forced a vdev to a persistently faulted
* state with 'zpool offline -f'. The persistent fault will
* remain across imports until cleared.
*
* Local vdevs will remain in the faulted state.
*/
if (spa_load_state(spa) == SPA_LOAD_OPEN ||
spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
&vd->vdev_degraded);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
&vd->vdev_removed);
if (vd->vdev_faulted || vd->vdev_degraded) {
const char *aux;
vd->vdev_label_aux =
VDEV_AUX_ERR_EXCEEDED;
if (nvlist_lookup_string(nv,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
strcmp(aux, "external") == 0)
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
else
vd->vdev_faulted = 0ULL;
}
}
}
/*
* Add ourselves to the parent's list of children.
*/
vdev_add_child(parent, vd);
*vdp = vd;
return (0);
}
void
vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
/*
* Scan queues are normally destroyed at the end of a scan. If the
* queue exists here, that implies the vdev is being removed while
* the scan is still running.
*/
if (vd->vdev_scan_io_queue != NULL) {
mutex_enter(&vd->vdev_scan_io_queue_lock);
dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
vd->vdev_scan_io_queue = NULL;
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
*/
vdev_close(vd);
ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
/*
* Free all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
vd->vdev_ops->vdev_op_fini(vd);
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
}
if (vd->vdev_log_mg != NULL) {
ASSERT0(vd->vdev_ms_count);
metaslab_group_destroy(vd->vdev_log_mg);
vd->vdev_log_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
*/
vdev_remove_child(vd->vdev_parent, vd);
ASSERT(vd->vdev_parent == NULL);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
* Clean up vdev structure.
*/
vdev_queue_fini(vd);
if (vd->vdev_path)
spa_strfree(vd->vdev_path);
if (vd->vdev_devid)
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path)
spa_strfree(vd->vdev_enc_sysfs_path);
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
if (vd->vdev_isspare)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm);
for (int t = 0; t < DTL_TYPES; t++) {
- range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
- range_tree_destroy(vd->vdev_dtl[t]);
+ zfs_range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
+ zfs_range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
EQUIV(vd->vdev_indirect_births != NULL,
vd->vdev_indirect_mapping != NULL);
if (vd->vdev_indirect_births != NULL) {
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vdev_indirect_births_close(vd->vdev_indirect_births);
}
if (vd->vdev_obsolete_sm != NULL) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
}
- range_tree_destroy(vd->vdev_obsolete_segments);
+ zfs_range_tree_destroy(vd->vdev_obsolete_segments);
rw_destroy(&vd->vdev_indirect_rwlock);
mutex_destroy(&vd->vdev_obsolete_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
mutex_destroy(&vd->vdev_scan_io_queue_lock);
mutex_destroy(&vd->vdev_initialize_lock);
mutex_destroy(&vd->vdev_initialize_io_lock);
cv_destroy(&vd->vdev_initialize_io_cv);
cv_destroy(&vd->vdev_initialize_cv);
mutex_destroy(&vd->vdev_trim_lock);
mutex_destroy(&vd->vdev_autotrim_lock);
mutex_destroy(&vd->vdev_trim_io_lock);
cv_destroy(&vd->vdev_trim_cv);
cv_destroy(&vd->vdev_autotrim_cv);
cv_destroy(&vd->vdev_autotrim_kick_cv);
cv_destroy(&vd->vdev_trim_io_cv);
mutex_destroy(&vd->vdev_rebuild_lock);
cv_destroy(&vd->vdev_rebuild_cv);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_deadman_rl);
zfs_ratelimit_fini(&vd->vdev_dio_verify_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
kmem_free(vd, sizeof (vdev_t));
}
/*
* Transfer top-level vdev state from svd to tvd.
*/
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
spa_t *spa = svd->vdev_spa;
metaslab_t *msp;
vdev_t *vd;
int t;
ASSERT(tvd == tvd->vdev_top);
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
tvd->vdev_top_zap = svd->vdev_top_zap;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_top_zap = 0;
if (tvd->vdev_mg)
ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
if (tvd->vdev_log_mg)
ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_log_mg = svd->vdev_log_mg;
tvd->vdev_ms = svd->vdev_ms;
svd->vdev_mg = NULL;
svd->vdev_log_mg = NULL;
svd->vdev_ms = NULL;
if (tvd->vdev_mg != NULL)
tvd->vdev_mg->mg_vd = tvd;
if (tvd->vdev_log_mg != NULL)
tvd->vdev_log_mg->mg_vd = tvd;
tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
svd->vdev_checkpoint_sm = NULL;
tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
svd->vdev_alloc_bias = VDEV_BIAS_NONE;
tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
svd->vdev_stat.vs_alloc = 0;
svd->vdev_stat.vs_space = 0;
svd->vdev_stat.vs_dspace = 0;
/*
* State which may be set on a top-level vdev that's in the
* process of being removed.
*/
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
ASSERT0(tvd->vdev_noalloc);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
tvd->vdev_noalloc = svd->vdev_noalloc;
tvd->vdev_removing = svd->vdev_removing;
tvd->vdev_rebuilding = svd->vdev_rebuilding;
tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
tvd->vdev_indirect_config = svd->vdev_indirect_config;
tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
tvd->vdev_indirect_births = svd->vdev_indirect_births;
- range_tree_swap(&svd->vdev_obsolete_segments,
+ zfs_range_tree_swap(&svd->vdev_obsolete_segments,
&tvd->vdev_obsolete_segments);
tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
svd->vdev_indirect_config.vic_mapping_object = 0;
svd->vdev_indirect_config.vic_births_object = 0;
svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
svd->vdev_indirect_mapping = NULL;
svd->vdev_indirect_births = NULL;
svd->vdev_obsolete_sm = NULL;
svd->vdev_noalloc = 0;
svd->vdev_removing = 0;
svd->vdev_rebuilding = 0;
for (t = 0; t < TXG_SIZE; t++) {
while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
}
if (list_link_active(&svd->vdev_config_dirty_node)) {
vdev_config_clean(svd);
vdev_config_dirty(tvd);
}
if (list_link_active(&svd->vdev_state_dirty_node)) {
vdev_state_clean(svd);
vdev_state_dirty(tvd);
}
tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
svd->vdev_deflate_ratio = 0;
tvd->vdev_islog = svd->vdev_islog;
svd->vdev_islog = 0;
dsl_scan_io_queue_vdev_xfer(svd, tvd);
}
static void
vdev_top_update(vdev_t *tvd, vdev_t *vd)
{
if (vd == NULL)
return;
vd->vdev_top = tvd;
for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]);
}
/*
* Add a mirror/replacing vdev above an existing vdev. There is no need to
* call .vdev_op_init() since mirror/replacing vdevs do not have private state.
*/
vdev_t *
vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
{
spa_t *spa = cvd->vdev_spa;
vdev_t *pvd = cvd->vdev_parent;
vdev_t *mvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_max_asize = cvd->vdev_max_asize;
mvd->vdev_psize = cvd->vdev_psize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
vdev_remove_child(pvd, cvd);
vdev_add_child(pvd, mvd);
cvd->vdev_id = mvd->vdev_children;
vdev_add_child(mvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (mvd == mvd->vdev_top)
vdev_top_transfer(cvd, mvd);
return (mvd);
}
/*
* Remove a 1-way mirror/replacing vdev from the tree.
*/
void
vdev_remove_parent(vdev_t *cvd)
{
vdev_t *mvd = cvd->vdev_parent;
vdev_t *pvd = mvd->vdev_parent;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(mvd->vdev_children == 1);
ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
mvd->vdev_ops == &vdev_replacing_ops ||
mvd->vdev_ops == &vdev_spare_ops);
cvd->vdev_ashift = mvd->vdev_ashift;
cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
vdev_remove_child(mvd, cvd);
vdev_remove_child(pvd, mvd);
/*
* If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
* Otherwise, we could have detached an offline device, and when we
* go to import the pool we'll think we have two top-level vdevs,
* instead of a different version of the same top-level vdev.
*/
if (mvd->vdev_top == mvd) {
uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
cvd->vdev_orig_guid = cvd->vdev_guid;
cvd->vdev_guid += guid_delta;
cvd->vdev_guid_sum += guid_delta;
/*
* If pool not set for autoexpand, we need to also preserve
* mvd's asize to prevent automatic expansion of cvd.
* Otherwise if we are adjusting the mirror by attaching and
* detaching children of non-uniform sizes, the mirror could
* autoexpand, unexpectedly requiring larger devices to
* re-establish the mirror.
*/
if (!cvd->vdev_spa->spa_autoexpand)
cvd->vdev_asize = mvd->vdev_asize;
}
cvd->vdev_id = mvd->vdev_id;
vdev_add_child(pvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
ASSERT(mvd->vdev_children == 0);
vdev_free(mvd);
}
/*
* Choose GCD for spa_gcd_alloc.
*/
static uint64_t
vdev_gcd(uint64_t a, uint64_t b)
{
while (b != 0) {
uint64_t t = b;
b = a % b;
a = t;
}
return (a);
}
/*
* Set spa_min_alloc and spa_gcd_alloc.
*/
static void
vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc)
{
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
if (spa->spa_gcd_alloc == INT_MAX) {
spa->spa_gcd_alloc = min_alloc;
} else {
spa->spa_gcd_alloc = vdev_gcd(min_alloc,
spa->spa_gcd_alloc);
}
}
void
vdev_metaslab_group_create(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
/*
* metaslab_group_create was delayed until allocation bias was available
*/
if (vd->vdev_mg == NULL) {
metaslab_class_t *mc;
if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
vd->vdev_alloc_bias = VDEV_BIAS_LOG;
ASSERT3U(vd->vdev_islog, ==,
(vd->vdev_alloc_bias == VDEV_BIAS_LOG));
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
mc = spa_log_class(spa);
break;
case VDEV_BIAS_SPECIAL:
mc = spa_special_class(spa);
break;
case VDEV_BIAS_DEDUP:
mc = spa_dedup_class(spa);
break;
default:
mc = spa_normal_class(spa);
}
vd->vdev_mg = metaslab_group_create(mc, vd,
spa->spa_alloc_count);
if (!vd->vdev_islog) {
vd->vdev_log_mg = metaslab_group_create(
spa_embedded_log_class(spa), vd, 1);
}
/*
* The spa ashift min/max only apply for the normal metaslab
* class. Class destination is late binding so ashift boundary
* setting had to wait until now.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
if (vd->vdev_ashift > spa->spa_max_ashift)
spa->spa_max_ashift = vd->vdev_ashift;
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
uint64_t min_alloc = vdev_get_min_alloc(vd);
vdev_spa_set_alloc(spa, min_alloc);
}
}
}
int
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
uint64_t oldc = vd->vdev_ms_count;
uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
metaslab_t **mspp;
int error;
boolean_t expanding = (oldc != 0);
ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
/*
* This vdev is not being allocated from yet or is a hole.
*/
if (vd->vdev_ms_shift == 0)
return (0);
ASSERT(!vd->vdev_ishole);
ASSERT(oldc <= newc);
mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (expanding) {
memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
for (uint64_t m = oldc; m < newc; m++) {
uint64_t object = 0;
/*
* vdev_ms_array may be 0 if we are creating the "fake"
* metaslabs for an indirect vdev for zdb's leak detection.
* See zdb_leak_init().
*/
if (txg == 0 && vd->vdev_ms_array != 0) {
error = dmu_read(spa->spa_meta_objset,
vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error != 0) {
vdev_dbgmsg(vd, "unable to read the metaslab "
"array [error=%d]", error);
return (error);
}
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
if (error != 0) {
vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
error);
return (error);
}
}
/*
* Find the emptiest metaslab on the vdev and mark it for use for
* embedded slog by moving it from the regular to the log metaslab
* group.
*/
if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
uint64_t slog_msid = 0;
uint64_t smallest = UINT64_MAX;
/*
* Note, we only search the new metaslabs, because the old
* (pre-existing) ones may be active (e.g. have non-empty
* range_tree's), and we don't move them to the new
* metaslab_t.
*/
for (uint64_t m = oldc; m < newc; m++) {
uint64_t alloc =
space_map_allocated(vd->vdev_ms[m]->ms_sm);
if (alloc < smallest) {
slog_msid = m;
smallest = alloc;
}
}
metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
/*
* The metaslab was marked as dirty at the end of
* metaslab_init(). Remove it from the dirty list so that we
* can uninitialize and reinitialize it to the new class.
*/
if (txg != 0) {
(void) txg_list_remove_this(&vd->vdev_ms_list,
slog_ms, txg);
}
uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
metaslab_fini(slog_ms);
VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
&vd->vdev_ms[slog_msid]));
}
if (txg == 0)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
/*
* If the vdev is marked as non-allocating then don't
* activate the metaslabs since we want to ensure that
* no allocations are performed on this device.
*/
if (vd->vdev_noalloc) {
/* track non-allocating vdev space */
spa->spa_nonallocating_dspace += spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
} else if (!expanding) {
metaslab_group_activate(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_group_activate(vd->vdev_log_mg);
}
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (0);
}
void
vdev_metaslab_fini(vdev_t *vd)
{
if (vd->vdev_checkpoint_sm != NULL) {
ASSERT(spa_feature_is_active(vd->vdev_spa,
SPA_FEATURE_POOL_CHECKPOINT));
space_map_close(vd->vdev_checkpoint_sm);
/*
* Even though we close the space map, we need to set its
* pointer to NULL. The reason is that vdev_metaslab_fini()
* may be called multiple times for certain operations
* (i.e. when destroying a pool) so we need to ensure that
* this clause never executes twice. This logic is similar
* to the one used for the vdev_ms clause below.
*/
vd->vdev_checkpoint_sm = NULL;
}
if (vd->vdev_ms != NULL) {
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_passivate(mg);
if (vd->vdev_log_mg != NULL) {
ASSERT(!vd->vdev_islog);
metaslab_group_passivate(vd->vdev_log_mg);
}
uint64_t count = vd->vdev_ms_count;
for (uint64_t m = 0; m < count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp != NULL)
metaslab_fini(msp);
}
vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
vd->vdev_ms_count = 0;
- for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
+ for (int i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT0(mg->mg_histogram[i]);
if (vd->vdev_log_mg != NULL)
ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
}
}
ASSERT0(vd->vdev_ms_count);
}
typedef struct vdev_probe_stats {
boolean_t vps_readable;
boolean_t vps_writeable;
boolean_t vps_zio_done_probe;
int vps_flags;
} vdev_probe_stats_t;
static void
vdev_probe_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd;
vdev_probe_stats_t *vps = zio->io_private;
ASSERT(vd->vdev_probe_zio != NULL);
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_error == 0)
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
vdev_dbgmsg(vd, "probe done, cant_read=%u cant_write=%u",
vd->vdev_cant_read, vd->vdev_cant_write);
if (vdev_readable(vd) &&
(vdev_writeable(vd) || !spa_writeable(spa))) {
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
vdev_dbgmsg(vd, "failed probe");
(void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, NULL, 0);
zio->io_error = SET_ERROR(ENXIO);
/*
* If this probe was initiated from zio pipeline, then
* change the state in a spa_async_request. Probes that
* were initiated from a vdev_open can change the state
* as part of the open call.
*/
if (vps->vps_zio_done_probe) {
vd->vdev_fault_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_FAULT_VDEV);
}
}
mutex_enter(&vd->vdev_probe_lock);
ASSERT(vd->vdev_probe_zio == zio);
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
}
/*
* Determine whether this device is accessible.
*
* Read and write to several known locations: the pad regions of each
* vdev label but the first, which we leave alone in case it contains
* a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
{
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL;
zio_t *pio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* Don't probe the probe.
*/
if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
return (NULL);
/*
* To prevent 'probe storms' when a device fails, we create
* just one probe i/o at a time. All zios that want to probe
* this vdev will become parents of the probe io.
*/
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_TRYHARD;
vps->vps_zio_done_probe = (zio != NULL);
if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
/*
* vdev_cant_read and vdev_cant_write can only
* transition from TRUE to FALSE when we have the
* SCL_ZIO lock as writer; otherwise they can only
* transition from FALSE to TRUE. This ensures that
* any zio looking at these values can assume that
* failures persist for the life of the I/O. That's
* important because when a device has intermittent
* connectivity problems, we want to ensure that
* they're ascribed to the device (ENXIO) and not
* the zio (EIO).
*
* Since we hold SCL_ZIO as writer here, clear both
* values so the probe can reevaluate from first
* principles.
*/
vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
}
vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
vdev_probe_done, vps,
vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
}
if (zio != NULL)
zio_add_child(zio, pio);
mutex_exit(&vd->vdev_probe_lock);
if (vps == NULL) {
ASSERT(zio != NULL);
return (NULL);
}
for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
if (zio == NULL)
return (pio);
zio_nowait(pio);
return (NULL);
}
static void
vdev_load_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_load_error = vdev_load(vd);
}
static void
vdev_open_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_open_thread = curthread;
vd->vdev_open_error = vdev_open(vd);
vd->vdev_open_thread = NULL;
}
static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
#ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
#endif
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Returns B_TRUE if the passed child should be opened.
*/
static boolean_t
vdev_default_open_children_func(vdev_t *vd)
{
(void) vd;
return (B_TRUE);
}
/*
* Open the requested child vdevs. If any of the leaf vdevs are using
* a ZFS volume then do the opens in a single thread. This avoids a
* deadlock when the current thread is holding the spa_namespace_lock.
*/
static void
vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
{
int children = vd->vdev_children;
taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
vd->vdev_nonrot = B_TRUE;
for (int c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (open_func(cvd) == B_FALSE)
continue;
if (tq == NULL || vdev_uses_zvols(vd)) {
cvd->vdev_open_error = vdev_open(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_open_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
vd->vdev_nonrot &= cvd->vdev_nonrot;
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
}
/*
* Open all child vdevs.
*/
void
vdev_open_children(vdev_t *vd)
{
vdev_open_children_impl(vd, vdev_default_open_children_func);
}
/*
* Conditionally open a subset of child vdevs.
*/
void
vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
{
vdev_open_children_impl(vd, open_func);
}
/*
* Compute the raidz-deflation ratio. Note, we hard-code 128k (1 << 17)
* because it is the "typical" blocksize. Even though SPA_MAXBLOCKSIZE
* changed, this algorithm can not change, otherwise it would inconsistently
* account for existing bp's. We also hard-code txg 0 for the same reason
* since expanded RAIDZ vdevs can use a different asize for different birth
* txg's.
*/
static void
vdev_set_deflate_ratio(vdev_t *vd)
{
if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
vd->vdev_deflate_ratio = (1 << 17) /
(vdev_psize_to_asize_txg(vd, 1 << 17, 0) >>
SPA_MINBLOCKSHIFT);
}
}
/*
* Choose the best of two ashifts, preferring one between logical ashift
* (absolute minimum) and administrator defined maximum, otherwise take
* the biggest of the two.
*/
uint64_t
vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b)
{
if (a > logical && a <= zfs_vdev_max_auto_ashift) {
if (b <= logical || b > zfs_vdev_max_auto_ashift)
return (a);
else
return (MAX(a, b));
} else if (b <= logical || b > zfs_vdev_max_auto_ashift)
return (MAX(a, b));
return (b);
}
/*
* Maximize performance by inflating the configured ashift for top level
* vdevs to be as close to the physical ashift as possible while maintaining
* administrator defined limits and ensuring it doesn't go below the
* logical ashift.
*/
static void
vdev_ashift_optimize(vdev_t *vd)
{
ASSERT(vd == vd->vdev_top);
if (vd->vdev_ashift < vd->vdev_physical_ashift &&
vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) {
vd->vdev_ashift = MIN(
MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
MAX(zfs_vdev_min_auto_ashift,
vd->vdev_physical_ashift));
} else {
/*
* If the logical and physical ashifts are the same, then
* we ensure that the top-level vdev's ashift is not smaller
* than our minimum ashift value. For the unusual case
* where logical ashift > physical ashift, we can't cap
* the calculated ashift based on max ashift as that
* would cause failures.
* We still check if we need to increase it to match
* the min ashift.
*/
vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
vd->vdev_ashift);
}
}
/*
* Prepare a virtual device for access.
*/
int
vdev_open(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
uint64_t max_osize = 0;
uint64_t asize, max_asize, psize;
uint64_t logical_ashift = 0;
uint64_t physical_ashift = 0;
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
vd->vdev_state == VDEV_STATE_CANT_OPEN ||
vd->vdev_state == VDEV_STATE_OFFLINE);
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_fault_wanted = B_FALSE;
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_min_asize = vdev_get_min_asize(vd);
/*
* If this vdev is not removed, check its fault status. If it's
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
&logical_ashift, &physical_ashift);
/* Keep the device in removed state if unplugged */
if (error == ENOENT && vd->vdev_removed) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_REMOVED,
VDEV_AUX_NONE);
return (error);
}
/*
* Physical volume size should never be larger than its max size, unless
* the disk has shrunk while we were reading it or the device is buggy
* or damaged: either way it's not safe for use, bail out of the open.
*/
if (osize > max_osize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_OPEN_FAILED);
return (SET_ERROR(ENXIO));
}
/*
* Reset the vdev_reopening flag so that we actually close
* the vdev on error.
*/
vd->vdev_reopening = B_FALSE;
if (zio_injection_enabled && error == 0)
error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
if (error) {
if (vd->vdev_removed &&
vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
vd->vdev_removed = B_FALSE;
if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
vd->vdev_stat.vs_aux);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
vd->vdev_stat.vs_aux);
}
return (error);
}
vd->vdev_removed = B_FALSE;
/*
* Recheck the faulted flag now that we have confirmed that
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
}
/*
* For hole or missing vdevs we just return success.
*/
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0);
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE);
break;
}
}
osize = P2ALIGN_TYPED(osize, sizeof (vdev_label_t), uint64_t);
max_osize = P2ALIGN_TYPED(max_osize, sizeof (vdev_label_t), uint64_t);
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
max_asize = max_osize - (VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
max_asize = max_osize;
}
/*
* If the vdev was expanded, record this so that we can re-create the
* uberblock rings in labels {2,3}, during the next sync.
*/
if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
vd->vdev_copy_uberblocks = B_TRUE;
vd->vdev_psize = psize;
/*
* Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EINVAL));
}
/*
* We can always set the logical/physical ashift members since
* their values are only used to calculate the vdev_ashift when
* the device is first added to the config. These values should
* not be used for anything else since they may change whenever
* the device is reopened and we don't store them in the label.
*/
vd->vdev_physical_ashift =
MAX(physical_ashift, vd->vdev_physical_ashift);
vd->vdev_logical_ashift = MAX(logical_ashift,
vd->vdev_logical_ashift);
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
* For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
/*
* If the vdev_ashift was not overridden at creation time
* (0) or the override value is impossible for the device,
* then set it the logical ashift and optimize the ashift.
*/
if (vd->vdev_ashift < vd->vdev_logical_ashift) {
vd->vdev_ashift = vd->vdev_logical_ashift;
if (vd->vdev_logical_ashift > ASHIFT_MAX) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_ASHIFT_TOO_BIG);
return (SET_ERROR(EDOM));
}
if (vd->vdev_top == vd && vd->vdev_attaching == B_FALSE)
vdev_ashift_optimize(vd);
vd->vdev_attaching = B_FALSE;
}
if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
vd->vdev_ashift > ASHIFT_MAX)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_ASHIFT);
return (SET_ERROR(EDOM));
}
} else {
/*
* Make sure the alignment required hasn't increased.
*/
if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
vd->vdev_ops->vdev_op_leaf) {
(void) zfs_ereport_post(
FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
spa, vd, NULL, NULL, 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EDOM));
}
vd->vdev_max_asize = max_asize;
}
/*
* If all children are healthy we update asize if either:
* The asize has increased, due to a device expansion caused by dynamic
* LUN growth or vdev replacement, and automatic expansion is enabled;
* making the additional space available.
*
* The asize has decreased, due to a device shrink usually caused by a
* vdev replace with a smaller device. This ensures that calculations
* based of max_asize and asize e.g. esize are always valid. It's safe
* to do this as we've already validated that asize is greater than
* vdev_min_asize.
*/
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
((asize > vd->vdev_asize &&
(vd->vdev_expanding || spa->spa_autoexpand)) ||
(asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
/*
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(error = zio_wait(vdev_probe(vd, NULL))) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
VDEV_AUX_ERR_EXCEEDED);
return (error);
}
/*
* Track the minimum allocation size.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
uint64_t min_alloc = vdev_get_min_alloc(vd);
vdev_spa_set_alloc(spa, min_alloc);
}
/*
* If this is a leaf vdev, assess whether a resilver is needed.
* But don't do this if we are doing a reopen for a scrub, since
* this would just restart the scrub we are already doing.
*/
if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
return (0);
}
static void
vdev_validate_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_validate_thread = curthread;
vd->vdev_validate_error = vdev_validate(vd);
vd->vdev_validate_thread = NULL;
}
/*
* Called once the vdevs are all opened, this routine validates the label
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
vdev_validate(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
taskq_t *tq = NULL;
nvlist_t *label;
uint64_t guid = 0, aux_guid = 0, top_guid;
uint64_t state;
nvlist_t *nvl;
uint64_t txg;
int children = vd->vdev_children;
if (vdev_validate_skip)
return (0);
if (children > 0) {
tq = taskq_create("vdev_validate", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
for (uint64_t c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
vdev_validate_child(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < children; c++) {
int error = vd->vdev_child[c]->vdev_validate_error;
if (error != 0)
return (SET_ERROR(EBADF));
}
/*
* If the device has already failed, or was marked offline, don't do
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
return (0);
/*
* If we are performing an extreme rewind, we allow for a label that
* was modified at a point after the current txg.
* If config lock is not held do not check for the txg. spa_sync could
* be updating the vdev's label before updating spa_last_synced_txg.
*/
if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
txg = UINT64_MAX;
else
txg = spa_last_synced_txg(spa);
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
"txg %llu", (u_longlong_t)txg);
return (0);
}
/*
* Determine if this vdev has been split off into another
* pool. If so, then refuse to open it.
*/
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
&aux_guid) == 0 && aux_guid == spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_GUID);
return (0);
}
/*
* If config is not trusted then ignore the spa guid check. This is
* necessary because if the machine crashed during a re-guid the new
* guid might have been written to all of the vdev labels, but not the
* cached config. The check will be performed again once we have the
* trusted config from the MOS.
*/
if (spa->spa_trust_config && guid != spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
"match config (%llu != %llu)", (u_longlong_t)guid,
(u_longlong_t)spa_guid(spa));
return (0);
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
!= 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
&aux_guid) != 0)
aux_guid = 0;
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_GUID);
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
!= 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_TOP_GUID);
return (0);
}
/*
* If this vdev just became a top-level vdev because its sibling was
* detached, it will have adopted the parent's vdev guid -- but the
* label may or may not be on disk yet. Fortunately, either version
* of the label will have the same top guid, so if we're a top-level
* vdev, we can safely compare to that instead.
* However, if the config comes from a cachefile that failed to update
* after the detach, a top-level vdev will appear as a non top-level
* vdev in the config. Also relax the constraints if we perform an
* extreme rewind.
*
* If we split this vdev off instead, then we also check the
* original pool's guid. We don't want to consider the vdev
* corrupt if it is partway through a split operation.
*/
if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
boolean_t mismatch = B_FALSE;
if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
mismatch = B_TRUE;
} else {
if (vd->vdev_guid != top_guid &&
vd->vdev_top->vdev_guid != guid)
mismatch = B_TRUE;
}
if (mismatch) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: config guid "
"doesn't match label guid");
vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
(u_longlong_t)vd->vdev_guid,
(u_longlong_t)vd->vdev_top->vdev_guid);
vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
"aux_guid %llu", (u_longlong_t)guid,
(u_longlong_t)top_guid, (u_longlong_t)aux_guid);
return (0);
}
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_STATE);
return (0);
}
nvlist_free(label);
/*
* If this is a verbatim import, no need to check the
* state of the pool.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE) {
vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
"for spa %s", (u_longlong_t)state, spa->spa_name);
return (SET_ERROR(EBADF));
}
/*
* If we were able to open and validate a vdev that was
* previously marked permanently unavailable, clear that state
* now.
*/
if (vd->vdev_not_present)
vd->vdev_not_present = 0;
return (0);
}
static void
vdev_update_path(const char *prefix, char *svd, char **dvd, uint64_t guid)
{
if (svd != NULL && *dvd != NULL) {
if (strcmp(svd, *dvd) != 0) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: %s changed "
"from '%s' to '%s'", (u_longlong_t)guid, prefix,
*dvd, svd);
spa_strfree(*dvd);
*dvd = spa_strdup(svd);
}
} else if (svd != NULL) {
*dvd = spa_strdup(svd);
zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
(u_longlong_t)guid, *dvd);
}
}
static void
vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
{
char *old, *new;
vdev_update_path("vdev_path", svd->vdev_path, &dvd->vdev_path,
dvd->vdev_guid);
vdev_update_path("vdev_devid", svd->vdev_devid, &dvd->vdev_devid,
dvd->vdev_guid);
vdev_update_path("vdev_physpath", svd->vdev_physpath,
&dvd->vdev_physpath, dvd->vdev_guid);
/*
* Our enclosure sysfs path may have changed between imports
*/
old = dvd->vdev_enc_sysfs_path;
new = svd->vdev_enc_sysfs_path;
if ((old != NULL && new == NULL) ||
(old == NULL && new != NULL) ||
((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
"changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
old, new);
if (dvd->vdev_enc_sysfs_path)
spa_strfree(dvd->vdev_enc_sysfs_path);
if (svd->vdev_enc_sysfs_path) {
dvd->vdev_enc_sysfs_path = spa_strdup(
svd->vdev_enc_sysfs_path);
} else {
dvd->vdev_enc_sysfs_path = NULL;
}
}
}
/*
* Recursively copy vdev paths from one vdev to another. Source and destination
* vdev trees must have same geometry otherwise return error. Intended to copy
* paths from userland config into MOS config.
*/
int
vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
{
if ((svd->vdev_ops == &vdev_missing_ops) ||
(svd->vdev_ishole && dvd->vdev_ishole) ||
(dvd->vdev_ops == &vdev_indirect_ops))
return (0);
if (svd->vdev_ops != dvd->vdev_ops) {
vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_guid != dvd->vdev_guid) {
vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
"%llu)", (u_longlong_t)svd->vdev_guid,
(u_longlong_t)dvd->vdev_guid);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_children != dvd->vdev_children) {
vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
"%llu != %llu", (u_longlong_t)svd->vdev_children,
(u_longlong_t)dvd->vdev_children);
return (SET_ERROR(EINVAL));
}
for (uint64_t i = 0; i < svd->vdev_children; i++) {
int error = vdev_copy_path_strict(svd->vdev_child[i],
dvd->vdev_child[i]);
if (error != 0)
return (error);
}
if (svd->vdev_ops->vdev_op_leaf)
vdev_copy_path_impl(svd, dvd);
return (0);
}
static void
vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
{
ASSERT(stvd->vdev_top == stvd);
ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
for (uint64_t i = 0; i < dvd->vdev_children; i++) {
vdev_copy_path_search(stvd, dvd->vdev_child[i]);
}
if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
return;
/*
* The idea here is that while a vdev can shift positions within
* a top vdev (when replacing, attaching mirror, etc.) it cannot
* step outside of it.
*/
vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
return;
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_copy_path_impl(vd, dvd);
}
/*
* Recursively copy vdev paths from one root vdev to another. Source and
* destination vdev trees may differ in geometry. For each destination leaf
* vdev, search a vdev with the same guid and top vdev id in the source.
* Intended to copy paths from userland config into MOS config.
*/
void
vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
{
uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
ASSERT(srvd->vdev_ops == &vdev_root_ops);
ASSERT(drvd->vdev_ops == &vdev_root_ops);
for (uint64_t i = 0; i < children; i++) {
vdev_copy_path_search(srvd->vdev_child[i],
drvd->vdev_child[i]);
}
}
/*
* Close a virtual device.
*/
void
vdev_close(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
spa_t *spa __maybe_unused = vd->vdev_spa;
ASSERT(vd != NULL);
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/*
* If our parent is reopening, then we are as well, unless we are
* going offline.
*/
if (pvd != NULL && pvd->vdev_reopening)
vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
vd->vdev_ops->vdev_op_close(vd);
/*
* We record the previous state before we close it, so that if we are
* doing a reopen(), we don't generate FMA ereports if we notice that
* it's still faulted.
*/
vd->vdev_prevstate = vd->vdev_state;
if (vd->vdev_offline)
vd->vdev_state = VDEV_STATE_OFFLINE;
else
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
}
void
vdev_hold(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return;
for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
vd->vdev_ops->vdev_op_hold(vd);
}
void
vdev_rele(vdev_t *vd)
{
ASSERT(spa_is_root(vd->vdev_spa));
for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
vd->vdev_ops->vdev_op_rele(vd);
}
/*
* Reopen all interior vdevs and any unopened leaves. We don't actually
* reopen leaf vdevs which had previously been opened as they might deadlock
* on the spa_config_lock. Instead we only obtain the leaf's physical size.
* If the leaf has never been opened then open it, as usual.
*/
void
vdev_reopen(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/* set the reopening flag unless we're taking the vdev offline */
vd->vdev_reopening = !vd->vdev_offline;
vdev_close(vd);
(void) vdev_open(vd);
/*
* Call vdev_validate() here to make sure we have the same device.
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
if (vd->vdev_aux) {
(void) vdev_validate_aux(vd);
if (vdev_readable(vd) && vdev_writeable(vd) &&
vd->vdev_aux == &spa->spa_l2cache) {
/*
* In case the vdev is present we should evict all ARC
* buffers and pointers to log blocks and reclaim their
* space before restoring its contents to L2ARC.
*/
if (l2arc_vdev_present(vd)) {
l2arc_rebuild_vdev(vd, B_TRUE);
} else {
l2arc_add_vdev(spa, vd);
}
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
} else {
(void) vdev_validate(vd);
}
/*
* Recheck if resilver is still needed and cancel any
* scheduled resilver if resilver is unneeded.
*/
if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) &&
spa->spa_async_tasks & SPA_ASYNC_RESILVER) {
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER;
mutex_exit(&spa->spa_async_lock);
}
/*
* Reassess parent vdev's health.
*/
vdev_propagate_state(vd);
}
int
vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
{
int error;
/*
* Normally, partial opens (e.g. of a mirror) are allowed.
* For a create, however, we want to fail the request if
* there are any components we can't open.
*/
error = vdev_open(vd);
if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
vdev_close(vd);
return (error ? error : SET_ERROR(ENXIO));
}
/*
* Recursively load DTLs and initialize all labels.
*/
if ((error = vdev_dtl_load(vd)) != 0 ||
(error = vdev_label_init(vd, txg, isreplacing ?
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
}
return (0);
}
void
vdev_metaslab_set_size(vdev_t *vd)
{
uint64_t asize = vd->vdev_asize;
uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
uint64_t ms_shift;
/*
* There are two dimensions to the metaslab sizing calculation:
* the size of the metaslab and the count of metaslabs per vdev.
*
* The default values used below are a good balance between memory
* usage (larger metaslab size means more memory needed for loaded
* metaslabs; more metaslabs means more memory needed for the
* metaslab_t structs), metaslab load time (larger metaslabs take
* longer to load), and metaslab sync time (more metaslabs means
* more time spent syncing all of them).
*
* In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
* The range of the dimensions are as follows:
*
* 2^29 <= ms_size <= 2^34
* 16 <= ms_count <= 131,072
*
* On the lower end of vdev sizes, we aim for metaslabs sizes of
* at least 512MB (2^29) to minimize fragmentation effects when
* testing with smaller devices. However, the count constraint
* of at least 16 metaslabs will override this minimum size goal.
*
* On the upper end of vdev sizes, we aim for a maximum metaslab
* size of 16GB. However, we will cap the total count to 2^17
* metaslabs to keep our memory footprint in check and let the
* metaslab size grow from there if that limit is hit.
*
* The net effect of applying above constrains is summarized below.
*
* vdev size metaslab count
* --------------|-----------------
* < 8GB ~16
* 8GB - 100GB one per 512MB
* 100GB - 3TB ~200
* 3TB - 2PB one per 16GB
* > 2PB ~131,072
* --------------------------------
*
* Finally, note that all of the above calculate the initial
* number of metaslabs. Expanding a top-level vdev will result
* in additional metaslabs being allocated making it possible
* to exceed the zfs_vdev_ms_count_limit.
*/
if (ms_count < zfs_vdev_min_ms_count)
ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
else if (ms_count > zfs_vdev_default_ms_count)
ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
else
ms_shift = zfs_vdev_default_ms_shift;
if (ms_shift < SPA_MAXBLOCKSHIFT) {
ms_shift = SPA_MAXBLOCKSHIFT;
} else if (ms_shift > zfs_vdev_max_ms_shift) {
ms_shift = zfs_vdev_max_ms_shift;
/* cap the total count to constrain memory footprint */
if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
}
vd->vdev_ms_shift = ms_shift;
ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
}
void
vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
{
ASSERT(vd == vd->vdev_top);
/* indirect vdevs don't have metaslabs or dtls */
ASSERT(vdev_is_concrete(vd) || flags == 0);
ASSERT(ISP2(flags));
ASSERT(spa_writeable(vd->vdev_spa));
if (flags & VDD_METASLAB)
(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
if (flags & VDD_DTL)
(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
}
void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf)
vdev_dirty(vd->vdev_top, flags, vd, txg);
}
/*
* DTLs.
*
* A vdev's DTL (dirty time log) is the set of transaction groups for which
* the vdev has less than perfect replication. There are four kinds of DTL:
*
* DTL_MISSING: txgs for which the vdev has no valid copies of the data
*
* DTL_PARTIAL: txgs for which data is available, but not fully replicated
*
* DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
* scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
* txgs that was scrubbed.
*
* DTL_OUTAGE: txgs which cannot currently be read, whether due to
* persistent errors or just some device being offline.
* Unlike the other three, the DTL_OUTAGE map is not generally
* maintained; it's only computed when needed, typically to
* determine whether a device can be detached.
*
* For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
* either has the data or it doesn't.
*
* For interior vdevs such as mirror and RAID-Z the picture is more complex.
* A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
* if any child is less than fully replicated, then so is its parent.
* A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
* comprising only those txgs which appear in 'maxfaults' or more children;
* those are the txgs we don't have enough replication to read. For example,
* double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
* thus, its DTL_MISSING consists of the set of txgs that appear in more than
* two child DTL_MISSING maps.
*
* It should be clear from the above that to compute the DTLs and outage maps
* for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
* Therefore, that is all we keep on disk. When loading the pool, or after
* a configuration change, we generate all other DTLs from first principles.
*/
void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
- range_tree_t *rt = vd->vdev_dtl[t];
+ zfs_range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
mutex_enter(&vd->vdev_dtl_lock);
- if (!range_tree_contains(rt, txg, size))
- range_tree_add(rt, txg, size);
+ if (!zfs_range_tree_contains(rt, txg, size))
+ zfs_range_tree_add(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
}
boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
- range_tree_t *rt = vd->vdev_dtl[t];
+ zfs_range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
/*
* While we are loading the pool, the DTLs have not been loaded yet.
* This isn't a problem but it can result in devices being tried
* which are known to not have the data. In which case, the import
* is relying on the checksum to ensure that we get the right data.
* Note that while importing we are only reading the MOS, which is
* always checksummed.
*/
mutex_enter(&vd->vdev_dtl_lock);
- if (!range_tree_is_empty(rt))
- dirty = range_tree_contains(rt, txg, size);
+ if (!zfs_range_tree_is_empty(rt))
+ dirty = zfs_range_tree_contains(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
return (dirty);
}
boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{
- range_tree_t *rt = vd->vdev_dtl[t];
+ zfs_range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
mutex_enter(&vd->vdev_dtl_lock);
- empty = range_tree_is_empty(rt);
+ empty = zfs_range_tree_is_empty(rt);
mutex_exit(&vd->vdev_dtl_lock);
return (empty);
}
/*
* Check if the txg falls within the range which must be
* resilvered. DVAs outside this range can always be skipped.
*/
boolean_t
vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
(void) dva, (void) psize;
/* Set by sequential resilver. */
if (phys_birth == TXG_UNKNOWN)
return (B_TRUE);
return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
}
/*
* Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
*/
boolean_t
vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
vd->vdev_ops->vdev_op_leaf)
return (B_TRUE);
return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
phys_birth));
}
/*
* Returns the lowest txg in the DTL range.
*/
static uint64_t
vdev_dtl_min(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
- ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
+ ASSERT3U(zfs_range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
- return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
+ return (zfs_range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
}
/*
* Returns the highest txg in the DTL.
*/
static uint64_t
vdev_dtl_max(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
- ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
+ ASSERT3U(zfs_range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
- return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
+ return (zfs_range_tree_max(vd->vdev_dtl[DTL_MISSING]));
}
/*
* Determine if a resilvering vdev should remove any DTL entries from
* its range. If the vdev was resilvering for the entire duration of the
* scan then it should excise that range from its DTLs. Otherwise, this
* vdev is considered partially resilvered and should leave its DTL
* entries intact. The comment in vdev_dtl_reassess() describes how we
* excise the DTLs.
*/
static boolean_t
vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
{
ASSERT0(vd->vdev_children);
if (vd->vdev_state < VDEV_STATE_DEGRADED)
return (B_FALSE);
if (vd->vdev_resilver_deferred)
return (B_FALSE);
- if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
+ if (zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE);
if (rebuild_done) {
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
/* Rebuild not initiated by attach */
if (vd->vdev_rebuild_txg == 0)
return (B_TRUE);
/*
* When a rebuild completes without error then all missing data
* up to the rebuild max txg has been reconstructed and the DTL
* is eligible for excision.
*/
if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
return (B_TRUE);
}
} else {
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
/* Resilver not initiated by attach */
if (vd->vdev_resilver_txg == 0)
return (B_TRUE);
/*
* When a resilver is initiated the scan will assign the
* scn_max_txg value to the highest txg value that exists
* in all DTLs. If this device's max DTL is not part of this
* scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
* then it is not eligible for excision.
*/
if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Reassess DTLs after a config change or scrub completion. If txg == 0 no
* write operations will be issued to the pool.
*/
static void
vdev_dtl_reassess_impl(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done, boolean_t faulting)
{
spa_t *spa = vd->vdev_spa;
avl_tree_t reftree;
int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess_impl(vd->vdev_child[c], txg,
scrub_txg, scrub_done, rebuild_done, faulting);
if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
return;
if (vd->vdev_ops->vdev_op_leaf) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
boolean_t check_excise = B_FALSE;
boolean_t wasempty = B_TRUE;
mutex_enter(&vd->vdev_dtl_lock);
/*
* If requested, pretend the scan or rebuild completed cleanly.
*/
if (zfs_scan_ignore_errors) {
if (scn != NULL)
scn->scn_phys.scn_errors = 0;
if (vr != NULL)
vr->vr_rebuild_phys.vrp_errors = 0;
}
if (scrub_txg != 0 &&
- !range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
+ !zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
wasempty = B_FALSE;
zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
"dtl:%llu/%llu errors:%llu",
(u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
(u_longlong_t)scrub_txg, spa->spa_scrub_started,
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd),
(u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
}
/*
* If we've completed a scrub/resilver or a rebuild cleanly
* then determine if this vdev should remove any DTLs. We
* only want to excise regions on vdevs that were available
* during the entire duration of this scan.
*/
if (rebuild_done &&
vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
check_excise = B_TRUE;
} else {
if (spa->spa_scrub_started ||
(scn != NULL && scn->scn_phys.scn_errors == 0)) {
check_excise = B_TRUE;
}
}
if (scrub_txg && check_excise &&
vdev_dtl_should_excise(vd, rebuild_done)) {
/*
* We completed a scrub, resilver or rebuild up to
* scrub_txg. If we did it without rebooting, then
* the scrub dtl will be valid, so excise the old
* region and fold in the scrub dtl. Otherwise,
* leave the dtl as-is if there was an error.
*
* There's little trick here: to excise the beginning
* of the DTL_MISSING map, we put it into a reference
* tree and then add a segment with refcnt -1 that
* covers the range [0, scrub_txg). This means
* that each txg in that range has refcnt -1 or 0.
* We then add DTL_SCRUB with a refcnt of 2, so that
* entries in the range [0, scrub_txg) will have a
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
space_reftree_create(&reftree);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_SCRUB], 2);
space_reftree_generate_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree);
- if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
+ if (!zfs_range_tree_is_empty(
+ vd->vdev_dtl[DTL_MISSING])) {
zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd));
} else if (!wasempty) {
zfs_dbgmsg("DTL_MISSING is now empty");
}
}
- range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
- range_tree_walk(vd->vdev_dtl[DTL_MISSING],
- range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
+ zfs_range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
+ zfs_range_tree_walk(vd->vdev_dtl[DTL_MISSING],
+ zfs_range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done)
- range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
- range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
+ zfs_range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL,
+ NULL);
+ zfs_range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
/*
* For the faulting case, treat members of a replacing vdev
* as if they are not available. It's more likely than not that
* a vdev in a replacing vdev could encounter read errors so
* treat it as not being able to contribute.
*/
if (!vdev_readable(vd) ||
(faulting && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_replacing_ops)) {
- range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
+ zfs_range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
} else {
- range_tree_walk(vd->vdev_dtl[DTL_MISSING],
- range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
+ zfs_range_tree_walk(vd->vdev_dtl[DTL_MISSING],
+ zfs_range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
}
/*
* If the vdev was resilvering or rebuilding and no longer
* has any DTLs then reset the appropriate flag and dirty
* the top level so that we persist the change.
*/
if (txg != 0 &&
- range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
- range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
+ zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
+ zfs_range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
if (vd->vdev_rebuild_txg != 0) {
vd->vdev_rebuild_txg = 0;
vdev_config_dirty(vd->vdev_top);
} else if (vd->vdev_resilver_txg != 0) {
vd->vdev_resilver_txg = 0;
vdev_config_dirty(vd->vdev_top);
}
}
mutex_exit(&vd->vdev_dtl_lock);
if (txg != 0)
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
} else {
mutex_enter(&vd->vdev_dtl_lock);
for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB) {
/* leaf vdevs only */
continue;
}
if (t == DTL_PARTIAL) {
/* i.e. non-zero */
minref = 1;
} else if (vdev_get_nparity(vd) != 0) {
/* RAIDZ, DRAID */
minref = vdev_get_nparity(vd) + 1;
} else {
/* any kind of mirror */
minref = vd->vdev_children;
}
space_reftree_create(&reftree);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree,
cvd->vdev_dtl[s], 1);
mutex_exit(&cvd->vdev_dtl_lock);
}
space_reftree_generate_map(&reftree,
vd->vdev_dtl[t], minref);
space_reftree_destroy(&reftree);
}
mutex_exit(&vd->vdev_dtl_lock);
}
if (vd->vdev_top->vdev_ops == &vdev_raidz_ops) {
raidz_dtl_reassessed(vd);
}
}
void
vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done)
{
return (vdev_dtl_reassess_impl(vd, txg, scrub_txg, scrub_done,
rebuild_done, B_FALSE));
}
/*
* Iterate over all the vdevs except spare, and post kobj events
*/
void
vdev_post_kobj_evt(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_kobj_evt_post &&
vd->vdev_kobj_flag == B_FALSE) {
vd->vdev_kobj_flag = B_TRUE;
vd->vdev_ops->vdev_op_kobj_evt_post(vd);
}
for (int c = 0; c < vd->vdev_children; c++)
vdev_post_kobj_evt(vd->vdev_child[c]);
}
/*
* Iterate over all the vdevs except spare, and clear kobj events
*/
void
vdev_clear_kobj_evt(vdev_t *vd)
{
vd->vdev_kobj_flag = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear_kobj_evt(vd->vdev_child[c]);
}
int
vdev_dtl_load(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
- range_tree_t *rt;
+ zfs_range_tree_t *rt;
int error = 0;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
ASSERT(vdev_is_concrete(vd));
/*
* If the dtl cannot be sync'd there is no need to open it.
*/
if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
return (0);
error = space_map_open(&vd->vdev_dtl_sm, mos,
vd->vdev_dtl_object, 0, -1ULL, 0);
if (error)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
- rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
+ rt = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock);
- range_tree_walk(rt, range_tree_add,
+ zfs_range_tree_walk(rt, zfs_range_tree_add,
vd->vdev_dtl[DTL_MISSING]);
mutex_exit(&vd->vdev_dtl_lock);
}
- range_tree_vacate(rt, NULL, NULL);
- range_tree_destroy(rt);
+ zfs_range_tree_vacate(rt, NULL, NULL);
+ zfs_range_tree_destroy(rt);
return (error);
}
for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0)
break;
}
return (error);
}
static void
vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *string;
ASSERT(alloc_bias != VDEV_BIAS_NONE);
string =
(alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
(alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
(alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
ASSERT(string != NULL);
VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
1, strlen(string) + 1, string, tx));
if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
spa_activate_allocation_classes(spa, tx);
}
}
void
vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zapobj, tx));
}
uint64_t
vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
DMU_OT_NONE, 0, tx);
ASSERT(zap != 0);
VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zap, tx));
return (zap);
}
void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops &&
!vd->vdev_top->vdev_removing) {
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
}
if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
vdev_zap_allocation_data(vd, tx);
}
}
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 &&
spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2))
spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx);
vd->vdev_root_zap = vdev_create_link_zap(vd, tx);
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx);
}
}
static void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
- range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
+ zfs_range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
- range_tree_t *rtsync;
+ zfs_range_tree_t *rtsync;
dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
mutex_enter(&vd->vdev_dtl_lock);
space_map_free(vd->vdev_dtl_sm, tx);
space_map_close(vd->vdev_dtl_sm);
vd->vdev_dtl_sm = NULL;
mutex_exit(&vd->vdev_dtl_lock);
/*
* We only destroy the leaf ZAP for detached leaves or for
* removed log devices. Removed data devices handle leaf ZAP
* cleanup later, once cancellation is no longer possible.
*/
if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
vd->vdev_top->vdev_islog)) {
vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
vd->vdev_leaf_zap = 0;
}
dmu_tx_commit(tx);
return;
}
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
0, -1ULL, 0));
ASSERT(vd->vdev_dtl_sm != NULL);
}
- rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
+ rtsync = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
mutex_enter(&vd->vdev_dtl_lock);
- range_tree_walk(rt, range_tree_add, rtsync);
+ zfs_range_tree_walk(rt, zfs_range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
- range_tree_vacate(rtsync, NULL, NULL);
+ zfs_range_tree_vacate(rtsync, NULL, NULL);
- range_tree_destroy(rtsync);
+ zfs_range_tree_destroy(rtsync);
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
"new object %llu", (u_longlong_t)txg, spa_name(spa),
(u_longlong_t)object,
(u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
dmu_tx_commit(tx);
}
/*
* Determine whether the specified vdev can be
* - offlined
* - detached
* - removed
* - faulted
* without losing data.
*/
boolean_t
vdev_dtl_required(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint8_t cant_read = vd->vdev_cant_read;
boolean_t required;
boolean_t faulting = vd->vdev_state == VDEV_STATE_FAULTED;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == spa->spa_root_vdev || vd == tvd)
return (B_TRUE);
/*
* Temporarily mark the device as unreadable, and then determine
* whether this results in any DTL outages in the top-level vdev.
* If not, we can safely offline/detach/remove the device.
*/
vd->vdev_cant_read = B_TRUE;
vdev_dtl_reassess_impl(tvd, 0, 0, B_FALSE, B_FALSE, faulting);
required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
vd->vdev_cant_read = cant_read;
vdev_dtl_reassess_impl(tvd, 0, 0, B_FALSE, B_FALSE, faulting);
if (!required && zio_injection_enabled) {
required = !!zio_handle_device_injection(vd, NULL,
SET_ERROR(ECHILD));
}
return (required);
}
/*
* Determine if resilver is needed, and if so the txg range.
*/
boolean_t
vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
{
boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0;
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
- if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
+ if (!zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd);
thismax = vdev_dtl_max(vd);
needed = B_TRUE;
}
mutex_exit(&vd->vdev_dtl_lock);
} else {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax;
if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
thismin = MIN(thismin, cmin);
thismax = MAX(thismax, cmax);
needed = B_TRUE;
}
}
}
if (needed && minp) {
*minp = thismin;
*maxp = thismax;
}
return (needed);
}
/*
* Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
* will contain either the checkpoint spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
int
vdev_load(vdev_t *vd)
{
int children = vd->vdev_children;
int error = 0;
taskq_t *tq = NULL;
/*
* It's only worthwhile to use the taskq for the root vdev, because the
* slow part is metaslab_init, and that only happens for top-level
* vdevs.
*/
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
tq = taskq_create("vdev_load", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
/*
* Recursively load all children.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
cvd->vdev_load_error = vdev_load(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_load_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < vd->vdev_children; c++) {
int error = vd->vdev_child[c]->vdev_load_error;
if (error != 0)
return (error);
}
vdev_set_deflate_ratio(vd);
if (vd->vdev_ops == &vdev_raidz_ops) {
error = vdev_raidz_load(vd);
if (error != 0)
return (error);
}
/*
* On spa_load path, grab the allocation bias from our zap
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
char bias_str[64];
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
bias_str);
if (error == 0) {
ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
} else if (error != ENOENT) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]",
(u_longlong_t)vd->vdev_top_zap, error);
return (error);
}
}
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
uint64_t failfast;
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
vdev_prop_to_name(VDEV_PROP_FAILFAST), sizeof (failfast),
1, &failfast);
if (error == 0) {
vd->vdev_failfast = failfast & 1;
} else if (error == ENOENT) {
vd->vdev_failfast = vdev_prop_default_numeric(
VDEV_PROP_FAILFAST);
} else {
vdev_dbgmsg(vd,
"vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]",
(u_longlong_t)vd->vdev_top_zap, error);
}
}
/*
* Load any rebuild state from the top-level vdev zap.
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
error = vdev_rebuild_load(vd);
if (error && error != ENOTSUP) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
"failed [error=%d]", error);
return (error);
}
}
if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) {
uint64_t zapobj;
if (vd->vdev_top_zap != 0)
zapobj = vd->vdev_top_zap;
else
zapobj = vd->vdev_leaf_zap;
error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_N,
&vd->vdev_checksum_n);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_T,
&vd->vdev_checksum_t);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_IO_N,
&vd->vdev_io_n);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_IO_T,
&vd->vdev_io_t);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_SLOW_IO_N,
&vd->vdev_slow_io_n);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_SLOW_IO_T,
&vd->vdev_slow_io_t);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
}
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
"asize=%llu", (u_longlong_t)vd->vdev_ashift,
(u_longlong_t)vd->vdev_asize);
return (SET_ERROR(ENXIO));
}
error = vdev_metaslab_init(vd, 0);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
"[error=%d]", error);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (error);
}
uint64_t checkpoint_sm_obj;
error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
vd->vdev_ashift);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: space_map_open "
"failed for checkpoint spacemap (obj %llu) "
"[error=%d]",
(u_longlong_t)checkpoint_sm_obj, error);
return (error);
}
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since the checkpoint_sm contains free entries
* exclusively we can use space_map_allocated() to
* indicate the cumulative checkpointed space that
* has been freed.
*/
vd->vdev_stat.vs_checkpoint_space =
-space_map_allocated(vd->vdev_checkpoint_sm);
vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
vd->vdev_stat.vs_checkpoint_space;
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
"checkpoint space map object from vdev ZAP "
"[error=%d]", error);
return (error);
}
}
/*
* If this is a leaf vdev, load its DTL.
*/
if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
"[error=%d]", error);
return (error);
}
uint64_t obsolete_sm_object;
error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
"obsolete spacemap (obj %llu) [error=%d]",
(u_longlong_t)obsolete_sm_object, error);
return (error);
}
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
"space map object from vdev ZAP [error=%d]", error);
return (error);
}
return (0);
}
/*
* The special vdev case is used for hot spares and l2cache devices. Its
* sole purpose it to set the vdev state for the associated vdev. To do this,
* we make sure that we can open the underlying device, then try to read the
* label, and make sure that the label is sane and that it hasn't been
* repurposed to another pool.
*/
int
vdev_validate_aux(vdev_t *vd)
{
nvlist_t *label;
uint64_t guid, version;
uint64_t state;
if (!vdev_readable(vd))
return (0);
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (-1);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
!SPA_VERSION_IS_SUPPORTED(version) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
guid != vd->vdev_guid ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (-1);
}
/*
* We don't actually check the pool state here. If it's in fact in
* use by another pool, we update this fact on the fly when requested.
*/
nvlist_free(label);
return (0);
}
static void
vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(vd->vdev_spa);
if (vd->vdev_top_zap == 0)
return;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
if (err == ENOENT)
return;
VERIFY0(err);
VERIFY0(dmu_object_free(mos, object, tx));
VERIFY0(zap_remove(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
}
/*
* Free the objects used to store this vdev's spacemaps, and the array
* that points to them.
*/
void
vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ms_array == 0)
return;
objset_t *mos = vd->vdev_spa->spa_meta_objset;
uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
size_t array_bytes = array_count * sizeof (uint64_t);
uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
array_bytes, smobj_array, 0));
for (uint64_t i = 0; i < array_count; i++) {
uint64_t smobj = smobj_array[i];
if (smobj == 0)
continue;
space_map_free_obj(mos, smobj, tx);
}
kmem_free(smobj_array, array_bytes);
VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
vdev_destroy_ms_flush_data(vd, tx);
vd->vdev_ms_array = 0;
}
static void
vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
vdev_destroy_spacemaps(vd, tx);
if (vd->vdev_top_zap != 0) {
vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
vd->vdev_top_zap = 0;
}
dmu_tx_commit(tx);
}
void
vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
ASSERT(vdev_is_concrete(vd));
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
!= NULL)
metaslab_sync_done(msp, txg);
if (reassess) {
metaslab_sync_reassess(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_sync_reassess(vd->vdev_log_mg);
}
}
void
vdev_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *lvd;
metaslab_t *msp;
ASSERT3U(txg, ==, spa->spa_syncing_txg);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
- if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
+ if (zfs_range_tree_space(vd->vdev_obsolete_segments) > 0) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
vdev_indirect_sync_obsolete(vd, tx);
/*
* If the vdev is indirect, it can't have dirty
* metaslabs or DTLs.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
dmu_tx_commit(tx);
return;
}
}
ASSERT(vdev_is_concrete(vd));
if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
!vd->vdev_removing) {
ASSERT(vd == vd->vdev_top);
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
ASSERT(vd->vdev_ms_array != 0);
vdev_config_dirty(vd);
}
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
metaslab_sync(msp, txg);
(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
}
while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
vdev_dtl_sync(lvd, txg);
/*
* If this is an empty log device being removed, destroy the
* metadata associated with it.
*/
if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
vdev_remove_empty_log(vd, txg);
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
dmu_tx_commit(tx);
}
/*
* Return the amount of space that should be (or was) allocated for the given
* psize (compressed block size) in the given TXG. Note that for expanded
* RAIDZ vdevs, the size allocated for older BP's may be larger. See
* vdev_raidz_asize().
*/
uint64_t
vdev_psize_to_asize_txg(vdev_t *vd, uint64_t psize, uint64_t txg)
{
return (vd->vdev_ops->vdev_op_asize(vd, psize, txg));
}
uint64_t
vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
{
return (vdev_psize_to_asize_txg(vd, psize, 0));
}
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
*/
int
vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd, *tvd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
tvd = vd->vdev_top;
/*
* If user did a 'zpool offline -f' then make the fault persist across
* reboots.
*/
if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
/*
* There are two kinds of forced faults: temporary and
* persistent. Temporary faults go away at pool import, while
* persistent faults stay set. Both types of faults can be
* cleared with a zpool clear.
*
* We tell if a vdev is persistently faulted by looking at the
* ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
* import then it's a persistent fault. Otherwise, it's
* temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
* by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
* tells vdev_config_generate() (which gets run later) to set
* ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
*/
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_tmpoffline = B_FALSE;
aux = VDEV_AUX_EXTERNAL;
} else {
vd->vdev_tmpoffline = B_TRUE;
}
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
* were faulted.
*/
vd->vdev_label_aux = aux;
/*
* Faulted state takes precedence over degraded.
*/
vd->vdev_delayed_close = B_FALSE;
vd->vdev_faulted = 1ULL;
vd->vdev_degraded = 0ULL;
vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
vd->vdev_degraded = 1ULL;
vd->vdev_faulted = 0ULL;
/*
* If we reopen the device and it's not dead, only then do we
* mark it degraded.
*/
vdev_reopen(tvd);
if (vdev_readable(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Mark the given vdev degraded. A degraded vdev is purely an indication to the
* user that something is wrong. The vdev continues to operate as normal as far
* as I/O is concerned.
*/
int
vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
/*
* If the vdev is already faulted, then don't do anything.
*/
if (vd->vdev_faulted || vd->vdev_degraded)
return (spa_vdev_state_exit(spa, NULL, 0));
vd->vdev_degraded = 1ULL;
if (!vdev_is_dead(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
aux);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_remove_wanted(spa_t *spa, uint64_t guid)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
/*
* If the vdev is already removed, or expanding which can trigger
* repartition add/remove events, then don't do anything.
*/
if (vd->vdev_removed || vd->vdev_expanding)
return (spa_vdev_state_exit(spa, NULL, 0));
/*
* Confirm the vdev has been removed, otherwise don't do anything.
*/
if (vd->vdev_ops->vdev_op_leaf && !zio_wait(vdev_probe(vd, NULL)))
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(EEXIST)));
vd->vdev_remove_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_REMOVE);
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Online the given vdev.
*
* If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
* spare device should be detached when the device finishes resilvering.
* Second, the online should be treated like a 'test' online case, so no FMA
* events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
boolean_t wasoffline;
vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
oldstate = vd->vdev_state;
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
/* XXX - L2ARC 1.0 does not support expansion */
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
spa->spa_autoexpand);
vd->vdev_expansion_time = gethrestime_sec();
}
vdev_reopen(tvd);
vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = B_FALSE;
}
if (newstate)
*newstate = vd->vdev_state;
if ((flags & ZFS_ONLINE_UNSPARE) &&
!vdev_is_dead(vd) && vd->vdev_parent &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
/* XXX - L2ARC 1.0 does not support expansion */
if (vd->vdev_aux)
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa->spa_ccw_fail_time = 0;
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
/* Restart initializing if necessary */
mutex_enter(&vd->vdev_initialize_lock);
if (vdev_writeable(vd) &&
vd->vdev_initialize_thread == NULL &&
vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) vdev_initialize(vd);
}
mutex_exit(&vd->vdev_initialize_lock);
/*
* Restart trimming if necessary. We do not restart trimming for cache
* devices here. This is triggered by l2arc_rebuild_vdev()
* asynchronously for the whole device or in l2arc_evict() as it evicts
* space for upcoming writes.
*/
mutex_enter(&vd->vdev_trim_lock);
if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
vd->vdev_trim_thread == NULL &&
vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
(void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
if (wasoffline ||
(oldstate < VDEV_STATE_DEGRADED &&
vd->vdev_state >= VDEV_STATE_DEGRADED)) {
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
/*
* Asynchronously detach spare vdev if resilver or
* rebuild is not required
*/
if (vd->vdev_unspare &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool) &&
!vdev_rebuild_active(tvd))
spa_async_request(spa, SPA_ASYNC_DETACH_SPARE);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
static int
vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
{
vdev_t *vd, *tvd;
int error = 0;
uint64_t generation;
metaslab_group_t *mg;
top:
spa_vdev_state_enter(spa, SCL_ALLOC);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
tvd = vd->vdev_top;
mg = tvd->vdev_mg;
generation = spa->spa_config_generation + 1;
/*
* If the device isn't already offline, try to offline it.
*/
if (!vd->vdev_offline) {
/*
* If this device has the only valid copy of some data,
* don't allow it to be offlined. Log devices are always
* expendable.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_dtl_required(vd))
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
/*
* If the top-level is a slog and it has had allocations
* then proceed. We check that the vdev's metaslab group
* is not NULL since it's possible that we may have just
* added this vdev but not yet initialized its metaslabs.
*/
if (tvd->vdev_islog && mg != NULL) {
/*
* Prevent any future allocations.
*/
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
error = spa_reset_logs(spa);
/*
* If the log device was successfully reset but has
* checkpointed data, do not offline it.
*/
if (error == 0 &&
tvd->vdev_checkpoint_sm != NULL) {
ASSERT3U(space_map_allocated(
tvd->vdev_checkpoint_sm), !=, 0);
error = ZFS_ERR_CHECKPOINT_EXISTS;
}
spa_vdev_state_enter(spa, SCL_ALLOC);
/*
* Check to see if the config has changed.
*/
if (error || generation != spa->spa_config_generation) {
metaslab_group_activate(mg);
if (error)
return (spa_vdev_state_exit(spa,
vd, error));
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
* Offline this device and reopen its top-level vdev.
* If the top-level vdev is a log device then just offline
* it. Otherwise, if this action results in the top-level
* vdev becoming unusable, undo it and fail the request.
*/
vd->vdev_offline = B_TRUE;
vdev_reopen(tvd);
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_is_dead(tvd)) {
vd->vdev_offline = B_FALSE;
vdev_reopen(tvd);
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
}
/*
* Add the device back into the metaslab rotor so that
* once we online the device it's open for business.
*/
if (tvd->vdev_islog && mg != NULL)
metaslab_group_activate(mg);
}
vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
{
int error;
mutex_enter(&spa->spa_vdev_top_lock);
error = vdev_offline_locked(spa, guid, flags);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
*/
void
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == NULL)
vd = rvd;
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vd->vdev_stat.vs_dio_verify_errors = 0;
vd->vdev_stat.vs_slow_ios = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
/*
* It makes no sense to "clear" an indirect or removed vdev.
*/
if (!vdev_is_concrete(vd) || vd->vdev_removed)
return;
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
* also mark the vdev config dirty, so that the new faulted state is
* written out to disk.
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
/*
* When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_forcefault = B_TRUE;
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
vd->vdev_forcefault = B_FALSE;
if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
/* If a resilver isn't required, check if vdevs can be culled */
if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* When clearing a FMA-diagnosed fault, we always want to
* unspare the device, as we assume that the original spare was
* done in response to the FMA fault.
*/
if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
/* Clear recent error events cache (i.e. duplicate events tracking) */
zfs_ereport_clear(spa, vd);
}
boolean_t
vdev_is_dead(vdev_t *vd)
{
/*
* Holes and missing devices are always considered "dead".
* This simplifies the code since we don't have to check for
* these types of devices in the various code paths.
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
return (vd->vdev_state < VDEV_STATE_DEGRADED ||
vd->vdev_ops == &vdev_hole_ops ||
vd->vdev_ops == &vdev_missing_ops);
}
boolean_t
vdev_readable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
}
boolean_t
vdev_writeable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
vdev_is_concrete(vd));
}
boolean_t
vdev_allocatable(vdev_t *vd)
{
uint64_t state = vd->vdev_state;
/*
* We currently allow allocations from vdevs which may be in the
* process of reopening (i.e. VDEV_STATE_CLOSED). If the device
* fails to reopen then we'll catch it later when we're holding
* the proper locks. Note that we have to get the vdev state
* in a local variable because although it changes atomically,
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
!vd->vdev_cant_write && vdev_is_concrete(vd) &&
vd->vdev_mg->mg_initialized);
}
boolean_t
vdev_accessible(vdev_t *vd, zio_t *zio)
{
ASSERT(zio->io_vd == vd);
if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
return (B_FALSE);
if (zio->io_type == ZIO_TYPE_READ)
return (!vd->vdev_cant_read);
if (zio->io_type == ZIO_TYPE_WRITE)
return (!vd->vdev_cant_write);
return (B_TRUE);
}
static void
vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
{
/*
* Exclude the dRAID spare when aggregating to avoid double counting
* the ops and bytes. These IOs are counted by the physical leaves.
*/
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return;
for (int t = 0; t < VS_ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
cvs->vs_scan_removing = cvd->vdev_removing;
}
/*
* Get extended stats
*/
static void
vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
(void) cvd;
int t, b;
for (t = 0; t < ZIO_TYPES; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
vsx->vsx_total_histo[t][b] +=
cvsx->vsx_total_histo[t][b];
}
}
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
vsx->vsx_queue_histo[t][b] +=
cvsx->vsx_queue_histo[t][b];
}
vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
}
}
boolean_t
vdev_is_spacemap_addressable(vdev_t *vd)
{
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
return (B_TRUE);
/*
* If double-word space map entries are not enabled we assume
* 47 bits of the space map entry are dedicated to the entry's
* offset (see SM_OFFSET_BITS in space_map.h). We then use that
* to calculate the maximum address that can be described by a
* space map entry for the given device.
*/
uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
if (shift >= 63) /* detect potential overflow */
return (B_TRUE);
return (vd->vdev_asize < (1ULL << shift));
}
/*
* Get statistics for the given vdev.
*/
static void
vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
int t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (!vd->vdev_ops->vdev_op_leaf) {
if (vs) {
memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
}
if (vsx)
memset(vsx, 0, sizeof (*vsx));
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
vdev_get_stats_ex_impl(cvd, cvs, cvsx);
if (vs)
vdev_get_child_stat(cvd, vs, cvs);
if (vsx)
vdev_get_child_stat_ex(cvd, vsx, cvsx);
}
} else {
/*
* We're a leaf. Just copy our ZIO active queue stats in. The
* other leaf stats are updated in vdev_stat_update().
*/
if (!vsx)
return;
memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
vsx->vsx_active_queue[t] = vd->vdev_queue.vq_cactive[t];
vsx->vsx_pend_queue[t] = vdev_queue_class_length(vd, t);
}
}
}
void
vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
vdev_t *tvd = vd->vdev_top;
mutex_enter(&vd->vdev_stat_lock);
if (vs) {
memcpy(vs, &vd->vdev_stat, sizeof (*vs));
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf) {
vs->vs_pspace = vd->vdev_psize;
vs->vs_rsize += VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE;
/*
* Report initializing progress. Since we don't
* have the initializing locks held, this is only
* an estimate (although a fairly accurate one).
*/
vs->vs_initialize_bytes_done =
vd->vdev_initialize_bytes_done;
vs->vs_initialize_bytes_est =
vd->vdev_initialize_bytes_est;
vs->vs_initialize_state = vd->vdev_initialize_state;
vs->vs_initialize_action_time =
vd->vdev_initialize_action_time;
/*
* Report manual TRIM progress. Since we don't have
* the manual TRIM locks held, this is only an
* estimate (although fairly accurate one).
*/
vs->vs_trim_notsup = !vd->vdev_has_trim;
vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
vs->vs_trim_state = vd->vdev_trim_state;
vs->vs_trim_action_time = vd->vdev_trim_action_time;
/* Set when there is a deferred resilver. */
vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
}
/*
* Report expandable space on top-level, non-auxiliary devices
* only. The expandable space is reported in terms of metaslab
* sized units since that determines how much space the pool
* can expand.
*/
if (vd->vdev_aux == NULL && tvd != NULL) {
vs->vs_esize = P2ALIGN_TYPED(
vd->vdev_max_asize - vd->vdev_asize,
1ULL << tvd->vdev_ms_shift, uint64_t);
}
vs->vs_configured_ashift = vd->vdev_top != NULL
? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
vs->vs_logical_ashift = vd->vdev_logical_ashift;
if (vd->vdev_physical_ashift <= ASHIFT_MAX)
vs->vs_physical_ashift = vd->vdev_physical_ashift;
else
vs->vs_physical_ashift = 0;
/*
* Report fragmentation and rebuild progress for top-level,
* non-auxiliary, concrete devices.
*/
if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
vdev_is_concrete(vd)) {
/*
* The vdev fragmentation rating doesn't take into
* account the embedded slog metaslab (vdev_log_mg).
* Since it's only one metaslab, it would have a tiny
* impact on the overall fragmentation.
*/
vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
vd->vdev_mg->mg_fragmentation : 0;
}
vs->vs_noalloc = MAX(vd->vdev_noalloc,
tvd ? tvd->vdev_noalloc : 0);
}
vdev_get_stats_ex_impl(vd, vs, vsx);
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
return (vdev_get_stats_ex(vd, vs, NULL));
}
void
vdev_clear_stats(vdev_t *vd)
{
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_space = 0;
vd->vdev_stat.vs_dspace = 0;
vd->vdev_stat.vs_alloc = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_scan_stat_init(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock);
vs->vs_scan_processed = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_stat_update(zio_t *zio, uint64_t psize)
{
spa_t *spa = zio->io_spa;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
vdev_t *pvd;
uint64_t txg = zio->io_txg;
/* Suppress ASAN false positive */
#ifdef __SANITIZE_ADDRESS__
vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL;
vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL;
#else
vdev_stat_t *vs = &vd->vdev_stat;
vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
#endif
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
/*
* If this i/o is a gang leader, it didn't do any actual work.
*/
if (zio->io_gang_tree)
return;
if (zio->io_error == 0) {
/*
* If this is a root i/o, don't count it -- we've already
* counted the top-level vdevs, and vdev_get_stats() will
* aggregate them when asked. This reduces contention on
* the root vdev_stat_lock and implicitly handles blocks
* that compress away to holes, for which there is no i/o.
* (Holes never create vdev children, so all the counters
* remain zero, which is what we want.)
*
* Note: this only applies to successful i/o (io_error == 0)
* because unlike i/o counts, errors are not additive.
* When reading a ditto block, for example, failure of
* one top-level vdev does not imply a root-level error.
*/
if (vd == rvd)
return;
ASSERT(vd == zio->io_vd);
if (flags & ZIO_FLAG_IO_BYPASS)
return;
mutex_enter(&vd->vdev_stat_lock);
if (flags & ZIO_FLAG_IO_REPAIR) {
/*
* Repair is the result of a resilver issued by the
* scan thread (spa_sync).
*/
if (flags & ZIO_FLAG_SCAN_THREAD) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scn_phys = &scn->scn_phys;
uint64_t *processed = &scn_phys->scn_processed;
if (vd->vdev_ops->vdev_op_leaf)
atomic_add_64(processed, psize);
vs->vs_scan_processed += psize;
}
/*
* Repair is the result of a rebuild issued by the
* rebuild thread (vdev_rebuild_thread). To avoid
* double counting repaired bytes the virtual dRAID
* spare vdev is excluded from the processed bytes.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
vdev_t *tvd = vd->vdev_top;
vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
atomic_add_64(rebuilt, psize);
}
vs->vs_rebuild_processed += psize;
}
if (flags & ZIO_FLAG_SELF_HEAL)
vs->vs_self_healed += psize;
}
/*
* The bytes/ops/histograms are recorded at the leaf level and
* aggregated into the higher level vdevs in vdev_get_stats().
*/
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
zio_type_t vs_type = type;
zio_priority_t priority = zio->io_priority;
/*
* TRIM ops and bytes are reported to user space as
* ZIO_TYPE_FLUSH. This is done to preserve the
* vdev_stat_t structure layout for user space.
*/
if (type == ZIO_TYPE_TRIM)
vs_type = ZIO_TYPE_FLUSH;
/*
* Solely for the purposes of 'zpool iostat -lqrw'
* reporting use the priority to categorize the IO.
* Only the following are reported to user space:
*
* ZIO_PRIORITY_SYNC_READ,
* ZIO_PRIORITY_SYNC_WRITE,
* ZIO_PRIORITY_ASYNC_READ,
* ZIO_PRIORITY_ASYNC_WRITE,
* ZIO_PRIORITY_SCRUB,
* ZIO_PRIORITY_TRIM,
* ZIO_PRIORITY_REBUILD.
*/
if (priority == ZIO_PRIORITY_INITIALIZING) {
ASSERT3U(type, ==, ZIO_TYPE_WRITE);
priority = ZIO_PRIORITY_ASYNC_WRITE;
} else if (priority == ZIO_PRIORITY_REMOVAL) {
priority = ((type == ZIO_TYPE_WRITE) ?
ZIO_PRIORITY_ASYNC_WRITE :
ZIO_PRIORITY_ASYNC_READ);
}
vs->vs_ops[vs_type]++;
vs->vs_bytes[vs_type] += psize;
if (flags & ZIO_FLAG_DELEGATED) {
vsx->vsx_agg_histo[priority]
[RQ_HISTO(zio->io_size)]++;
} else {
vsx->vsx_ind_histo[priority]
[RQ_HISTO(zio->io_size)]++;
}
if (zio->io_delta && zio->io_delay) {
vsx->vsx_queue_histo[priority]
[L_HISTO(zio->io_delta - zio->io_delay)]++;
vsx->vsx_disk_histo[type]
[L_HISTO(zio->io_delay)]++;
vsx->vsx_total_histo[type]
[L_HISTO(zio->io_delta)]++;
}
}
mutex_exit(&vd->vdev_stat_lock);
return;
}
if (flags & ZIO_FLAG_SPECULATIVE)
return;
/*
* If this is an I/O error that is going to be retried, then ignore the
* error. Otherwise, the user may interpret B_FAILFAST I/O errors as
* hard errors, when in reality they can happen for any number of
* innocuous reasons (bus resets, MPxIO link failure, etc).
*/
if (zio->io_error == EIO &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY))
return;
/*
* Intent logs writes won't propagate their error to the root
* I/O so don't mark these types of failures as pool-level
* errors.
*/
if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
return;
if (type == ZIO_TYPE_WRITE && txg != 0 &&
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
/*
* This is either a normal write (not a repair), or it's
* a repair induced by the scrub thread, or it's a repair
* made by zil_claim() during spa_load() in the first txg.
* In the normal case, we commit the DTL change in the same
* txg as the block was born. In the scrub-induced repair
* case, we know that scrubs run in first-pass syncing context,
* so we commit the DTL change in spa_syncing_txg(spa).
* In the zil_claim() case, we commit in spa_first_txg(spa).
*
* We currently do not make DTL entries for failed spontaneous
* self-healing writes triggered by normal (non-scrubbing)
* reads, because we have no transactional context in which to
* do so -- and it's not clear that it'd be desirable anyway.
*/
if (vd->vdev_ops->vdev_op_leaf) {
uint64_t commit_txg = txg;
if (flags & ZIO_FLAG_SCAN_THREAD) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
ASSERT(spa_sync_pass(spa) == 1);
vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
commit_txg = spa_syncing_txg(spa);
} else if (spa->spa_claiming) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
commit_txg = spa_first_txg(spa);
}
ASSERT(commit_txg >= spa_syncing_txg(spa));
if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
return;
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
}
if (vd != rvd)
vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
}
}
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
}
/*
* Update the in-core space usage stats for this vdev, its metaslab class,
* and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
(void) defer_delta;
int64_t dspace_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(vd == vd->vdev_top);
/*
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its
* children's, thus not accurate enough for us.
*/
dspace_delta = vdev_deflated_space(vd, space_delta);
mutex_enter(&vd->vdev_stat_lock);
/* ensure we won't underflow */
if (alloc_delta < 0) {
ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
}
vd->vdev_stat.vs_alloc += alloc_delta;
vd->vdev_stat.vs_space += space_delta;
vd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&vd->vdev_stat_lock);
/* every class but log contributes to root space stats */
if (vd->vdev_mg != NULL && !vd->vdev_islog) {
ASSERT(!vd->vdev_isl2cache);
mutex_enter(&rvd->vdev_stat_lock);
rvd->vdev_stat.vs_alloc += alloc_delta;
rvd->vdev_stat.vs_space += space_delta;
rvd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&rvd->vdev_stat_lock);
}
/* Note: metaslab_class_space_update moved to metaslab_space_update */
}
/*
* Mark a top-level vdev's config as dirty, placing it on the dirty list
* so that it will be written out next time the vdev configuration is synced.
* If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
*/
void
vdev_config_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_writeable(spa));
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
*/
if (vd->vdev_aux != NULL) {
spa_aux_vdev_t *sav = vd->vdev_aux;
nvlist_t **aux;
uint_t naux;
for (c = 0; c < sav->sav_count; c++) {
if (sav->sav_vdevs[c] == vd)
break;
}
if (c == sav->sav_count) {
/*
* We're being removed. There's nothing more to do.
*/
ASSERT(sav->sav_sync == B_TRUE);
return;
}
sav->sav_sync = B_TRUE;
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
}
ASSERT(c < naux);
/*
* Setting the nvlist in the middle if the array is a little
* sketchy, but it will work.
*/
nvlist_free(aux[c]);
aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
return;
}
/*
* The dirty list is protected by the SCL_CONFIG lock. The caller
* must either hold SCL_CONFIG as writer, or must be the sync thread
* (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
if (vd == rvd) {
for (c = 0; c < rvd->vdev_children; c++)
vdev_config_dirty(rvd->vdev_child[c]);
} else {
ASSERT(vd == vd->vdev_top);
if (!list_link_active(&vd->vdev_config_dirty_node) &&
vdev_is_concrete(vd)) {
list_insert_head(&spa->spa_config_dirty_list, vd);
}
}
}
void
vdev_config_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
ASSERT(list_link_active(&vd->vdev_config_dirty_node));
list_remove(&spa->spa_config_dirty_list, vd);
}
/*
* Mark a top-level vdev's state as dirty, so that the next pass of
* spa_sync() can convert this into vdev_config_dirty(). We distinguish
* the state changes from larger config changes because they require
* much less locking, and are often needed for administrative actions.
*/
void
vdev_state_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_writeable(spa));
ASSERT(vd == vd->vdev_top);
/*
* The state list is protected by the SCL_STATE lock. The caller
* must either hold SCL_STATE as writer, or must be the sync thread
* (which holds SCL_STATE as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
if (!list_link_active(&vd->vdev_state_dirty_node) &&
vdev_is_concrete(vd))
list_insert_head(&spa->spa_state_dirty_list, vd);
}
void
vdev_state_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
ASSERT(list_link_active(&vd->vdev_state_dirty_node));
list_remove(&spa->spa_state_dirty_list, vd);
}
/*
* Propagate vdev state up from children to parent.
*/
void
vdev_propagate_state(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int degraded = 0, faulted = 0;
int corrupted = 0;
vdev_t *child;
if (vd->vdev_children > 0) {
for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
/*
* Don't factor holes or indirect vdevs into the
* decision.
*/
if (!vdev_is_concrete(child))
continue;
if (!vdev_readable(child) ||
(!vdev_writeable(child) && spa_writeable(spa))) {
/*
* Root special: if there is a top-level log
* device, treat the root vdev as if it were
* degraded.
*/
if (child->vdev_islog && vd == rvd)
degraded++;
else
faulted++;
} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
degraded++;
}
if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
corrupted++;
}
vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
/*
* Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
*/
if (corrupted && vd == rvd &&
rvd->vdev_state == VDEV_STATE_CANT_OPEN)
vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
}
if (vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Set a vdev's state. If this is during an open, we don't update the parent
* state, because we're in the process of opening children depth-first.
* Otherwise, we propagate the change to the parent.
*
* If this routine places a device in a faulted state, an appropriate ereport is
* generated.
*/
void
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
{
uint64_t save_state;
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
/*
* Since vdev_offline() code path is already in an offline
* state we can miss a statechange event to OFFLINE. Check
* the previous state to catch this condition.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(state == VDEV_STATE_OFFLINE) &&
(vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
/* post an offline state change */
zfs_post_state_change(spa, vd, vd->vdev_prevstate);
}
vd->vdev_stat.vs_aux = aux;
return;
}
save_state = vd->vdev_state;
vd->vdev_state = state;
vd->vdev_stat.vs_aux = aux;
/*
* If we are setting the vdev state to anything but an open state, then
* always close the underlying device unless the device has requested
* a delayed close (i.e. we're about to remove or fault the device).
* Otherwise, we keep accessible but invalid devices open forever.
* We don't call vdev_close() itself, because that implies some extra
* checks (offline, etc) that we don't want here. This is limited to
* leaf devices, because otherwise closing the device will affect other
* children.
*/
if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
/*
* If the previous state is set to VDEV_STATE_REMOVED, then this
* device was previously marked removed and someone attempted to
* reopen it. If this failed due to a nonexistent device, then
* keep the device in the REMOVED state. We also let this be if
* it is one of our special test online cases, which is only
* attempting to online the device and shouldn't generate an FMA
* fault.
*/
vd->vdev_state = VDEV_STATE_REMOVED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
} else if (state == VDEV_STATE_REMOVED) {
vd->vdev_removed = B_TRUE;
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* If we fail to open a vdev during an import or recovery, we
* mark it as "not available", which signifies that it was
* never there to begin with. Failure to open such a device
* is not considered an error.
*/
if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_not_present = 1;
/*
* Post the appropriate ereport. If the 'prevstate' field is
* set to something other than VDEV_STATE_UNKNOWN, it indicates
* that this is part of a vdev_reopen(). In this case, we don't
* want to post the ereport if the device was already in the
* CANT_OPEN state beforehand.
*
* If the 'checkremove' flag is set, then this is an attempt to
* online the device in response to an insertion event. If we
* hit this case, then we have detected an insertion event for a
* faulted or offline device that wasn't in the removed state.
* In this scenario, we don't post an ereport because we are
* about to replace the device, or attempt an online with
* vdev_forcefault, which will generate the fault for us.
*/
if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
!vd->vdev_not_present && !vd->vdev_checkremove &&
vd != spa->spa_root_vdev) {
const char *class;
switch (aux) {
case VDEV_AUX_OPEN_FAILED:
class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
break;
case VDEV_AUX_CORRUPT_DATA:
class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
break;
case VDEV_AUX_NO_REPLICAS:
class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
break;
case VDEV_AUX_BAD_GUID_SUM:
class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
break;
case VDEV_AUX_TOO_SMALL:
class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
break;
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
case VDEV_AUX_BAD_ASHIFT:
class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
(void) zfs_ereport_post(class, spa, vd, NULL, NULL,
save_state);
}
/* Erase any notion of persistent removed state */
vd->vdev_removed = B_FALSE;
} else {
vd->vdev_removed = B_FALSE;
}
/*
* Notify ZED of any significant state-change on a leaf vdev.
*
*/
if (vd->vdev_ops->vdev_op_leaf) {
/* preserve original state from a vdev_reopen() */
if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
(vd->vdev_prevstate != vd->vdev_state) &&
(save_state <= VDEV_STATE_CLOSED))
save_state = vd->vdev_prevstate;
/* filter out state change due to initial vdev_open */
if (save_state > VDEV_STATE_CLOSED)
zfs_post_state_change(spa, vd, save_state);
}
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
boolean_t
vdev_children_are_offline(vdev_t *vd)
{
ASSERT(!vd->vdev_ops->vdev_op_leaf);
for (uint64_t i = 0; i < vd->vdev_children; i++) {
if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
* a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
if (!vd->vdev_ops->vdev_op_leaf) {
const char *vdev_type = vd->vdev_ops->vdev_op_type;
if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
return (B_FALSE);
}
for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
vdev_is_concrete(vdev_t *vd)
{
vdev_ops_t *ops = vd->vdev_ops;
if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
ops == &vdev_missing_ops || ops == &vdev_root_ops) {
return (B_FALSE);
} else {
return (B_TRUE);
}
}
/*
* Determine if a log device has valid content. If the vdev was
* removed or faulted in the MOS config then we know that
* the content on the log device has already been written to the pool.
*/
boolean_t
vdev_log_state_valid(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed)
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Expand a vdev if possible.
*/
void
vdev_expand(vdev_t *vd, uint64_t txg)
{
ASSERT(vd->vdev_top == vd);
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(vdev_is_concrete(vd));
vdev_set_deflate_ratio(vd);
if ((vd->vdev_spa->spa_raidz_expand == NULL ||
vd->vdev_spa->spa_raidz_expand->vre_vdev_id != vd->vdev_id) &&
(vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
VERIFY(vdev_metaslab_init(vd, txg) == 0);
vdev_config_dirty(vd);
}
}
/*
* Split a vdev.
*/
void
vdev_split(vdev_t *vd)
{
vdev_t *cvd, *pvd = vd->vdev_parent;
VERIFY3U(pvd->vdev_children, >, 1);
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
ASSERT3P(pvd->vdev_child, !=, NULL);
cvd = pvd->vdev_child[0];
if (pvd->vdev_children == 1) {
vdev_remove_parent(cvd);
cvd->vdev_splitting = B_TRUE;
}
vdev_propagate_state(cvd);
}
void
vdev_deadman(vdev_t *vd, const char *tag)
{
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_deadman(cvd, tag);
}
if (vd->vdev_ops->vdev_op_leaf) {
vdev_queue_t *vq = &vd->vdev_queue;
mutex_enter(&vq->vq_lock);
if (vq->vq_active > 0) {
spa_t *spa = vd->vdev_spa;
zio_t *fio;
uint64_t delta;
zfs_dbgmsg("slow vdev: %s has %u active IOs",
vd->vdev_path, vq->vq_active);
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
* the spa_deadman_synctime invoke the deadman logic.
*/
fio = list_head(&vq->vq_active_list);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa))
zio_deadman(fio, tag);
}
mutex_exit(&vq->vq_lock);
}
}
void
vdev_defer_resilver(vdev_t *vd)
{
ASSERT(vd->vdev_ops->vdev_op_leaf);
vd->vdev_resilver_deferred = B_TRUE;
vd->vdev_spa->spa_resilver_deferred = B_TRUE;
}
/*
* Clears the resilver deferred flag on all leaf devs under vd. Returns
* B_TRUE if we have devices that need to be resilvered and are available to
* accept resilver I/Os.
*/
boolean_t
vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
{
boolean_t resilver_needed = B_FALSE;
spa_t *spa = vd->vdev_spa;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
}
if (vd == spa->spa_root_vdev &&
spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
vdev_config_dirty(vd);
spa->spa_resilver_deferred = B_FALSE;
return (resilver_needed);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (resilver_needed);
vd->vdev_resilver_deferred = B_FALSE;
return (!vdev_is_dead(vd) && !vd->vdev_offline &&
vdev_resilver_needed(vd, NULL, NULL));
}
boolean_t
-vdev_xlate_is_empty(range_seg64_t *rs)
+vdev_xlate_is_empty(zfs_range_seg64_t *rs)
{
return (rs->rs_start == rs->rs_end);
}
/*
* Translate a logical range to the first contiguous physical range for the
* specified vdev_t. This function is initially called with a leaf vdev and
* will walk each parent vdev until it reaches a top-level vdev. Once the
* top-level is reached the physical range is initialized and the recursive
* function begins to unwind. As it unwinds it calls the parent's vdev
* specific translation function to do the real conversion.
*/
void
-vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
- range_seg64_t *physical_rs, range_seg64_t *remain_rs)
+vdev_xlate(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
+ zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
{
/*
* Walk up the vdev tree
*/
if (vd != vd->vdev_top) {
vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
remain_rs);
} else {
/*
* We've reached the top-level vdev, initialize the physical
* range to the logical range and set an empty remaining
* range then start to unwind.
*/
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
remain_rs->rs_start = logical_rs->rs_start;
remain_rs->rs_end = logical_rs->rs_start;
return;
}
vdev_t *pvd = vd->vdev_parent;
ASSERT3P(pvd, !=, NULL);
ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
/*
* As this recursive function unwinds, translate the logical
* range into its physical and any remaining components by calling
* the vdev specific translate function.
*/
- range_seg64_t intermediate = { 0 };
+ zfs_range_seg64_t intermediate = { 0 };
pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
physical_rs->rs_start = intermediate.rs_start;
physical_rs->rs_end = intermediate.rs_end;
}
void
-vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
+vdev_xlate_walk(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg)
{
- range_seg64_t iter_rs = *logical_rs;
- range_seg64_t physical_rs;
- range_seg64_t remain_rs;
+ zfs_range_seg64_t iter_rs = *logical_rs;
+ zfs_range_seg64_t physical_rs;
+ zfs_range_seg64_t remain_rs;
while (!vdev_xlate_is_empty(&iter_rs)) {
vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
/*
* With raidz and dRAID, it's possible that the logical range
* does not live on this leaf vdev. Only when there is a non-
* zero physical size call the provided function.
*/
if (!vdev_xlate_is_empty(&physical_rs))
func(arg, &physical_rs);
iter_rs = remain_rs;
}
}
static char *
vdev_name(vdev_t *vd, char *buf, int buflen)
{
if (vd->vdev_path == NULL) {
if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) {
strlcpy(buf, vd->vdev_spa->spa_name, buflen);
} else if (!vd->vdev_ops->vdev_op_leaf) {
snprintf(buf, buflen, "%s-%llu",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id);
}
} else {
strlcpy(buf, vd->vdev_path, buflen);
}
return (buf);
}
/*
* Look at the vdev tree and determine whether any devices are currently being
* replaced.
*/
boolean_t
vdev_replace_in_progress(vdev_t *vdev)
{
ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
if (vdev->vdev_ops == &vdev_replacing_ops)
return (B_TRUE);
/*
* A 'spare' vdev indicates that we have a replace in progress, unless
* it has exactly two children, and the second, the hot spare, has
* finished being resilvered.
*/
if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
!vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
return (B_TRUE);
for (int i = 0; i < vdev->vdev_children; i++) {
if (vdev_replace_in_progress(vdev->vdev_child[i]))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
vdev_prop_add_list(nvlist_t *nvl, const char *propname, const char *strval,
uint64_t intval, zprop_source_t src)
{
nvlist_t *propval;
propval = fnvlist_alloc();
fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
if (strval != NULL)
fnvlist_add_string(propval, ZPROP_VALUE, strval);
else
fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
fnvlist_add_nvlist(nvl, propname, propval);
nvlist_free(propval);
}
static void
vdev_props_set_sync(void *arg, dmu_tx_t *tx)
{
vdev_t *vd;
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
uint64_t objid;
nvlist_t *nvprops;
vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV);
nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS);
vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
/* this vdev could get removed while waiting for this sync task */
if (vd == NULL)
return;
/*
* Set vdev property values in the vdev props mos object.
*/
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
panic("unexpected vdev type");
}
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
uint64_t intval;
const char *strval;
vdev_prop_t prop;
const char *propname = nvpair_name(elem);
zprop_type_t proptype;
switch (prop = vdev_name_to_prop(propname)) {
case VDEV_PROP_USERPROP:
if (vdev_prop_user(propname)) {
strval = fnvpair_value_string(elem);
if (strlen(strval) == 0) {
/* remove the property if value == "" */
(void) zap_remove(mos, objid, propname,
tx);
} else {
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
}
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
}
break;
default:
/* normalize the property name */
propname = vdev_prop_to_name(prop);
proptype = vdev_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(vdev_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos, objid, propname,
sizeof (uint64_t), 1, &intval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%lld",
(u_longlong_t)vdev_guid,
nvpair_name(elem), (longlong_t)intval);
} else {
panic("invalid vdev property type %u",
nvpair_type(elem));
}
}
}
mutex_exit(&spa->spa_props_lock);
}
int
vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
nvlist_t *nvprops;
int error = 0;
ASSERT(vd != NULL);
/* Check that vdev has a zap we can use */
if (vd->vdev_root_zap == 0 &&
vd->vdev_top_zap == 0 &&
vd->vdev_leaf_zap == 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS,
&nvprops) != 0)
return (SET_ERROR(EINVAL));
if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
return (SET_ERROR(EINVAL));
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
const char *propname = nvpair_name(elem);
vdev_prop_t prop = vdev_name_to_prop(propname);
uint64_t intval = 0;
const char *strval = NULL;
if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) {
error = EINVAL;
goto end;
}
if (prop != VDEV_PROP_USERPROP && vdev_prop_readonly(prop)) {
error = EROFS;
goto end;
}
/* Special Processing */
switch (prop) {
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL) {
error = EROFS;
break;
}
if (nvpair_value_string(elem, &strval) != 0) {
error = EINVAL;
break;
}
/* New path must start with /dev/ */
if (strncmp(strval, "/dev/", 5)) {
error = EINVAL;
break;
}
error = spa_vdev_setpath(spa, vdev_guid, strval);
break;
case VDEV_PROP_ALLOCATING:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
if (intval != vd->vdev_noalloc)
break;
if (intval == 0)
error = spa_vdev_noalloc(spa, vdev_guid);
else
error = spa_vdev_alloc(spa, vdev_guid);
break;
case VDEV_PROP_FAILFAST:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_failfast = intval & 1;
break;
case VDEV_PROP_CHECKSUM_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_checksum_n = intval;
break;
case VDEV_PROP_CHECKSUM_T:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_checksum_t = intval;
break;
case VDEV_PROP_IO_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_io_n = intval;
break;
case VDEV_PROP_IO_T:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_io_t = intval;
break;
case VDEV_PROP_SLOW_IO_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_slow_io_n = intval;
break;
case VDEV_PROP_SLOW_IO_T:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_slow_io_t = intval;
break;
default:
/* Most processing is done in vdev_props_set_sync */
break;
}
end:
if (error != 0) {
intval = error;
vdev_prop_add_list(outnvl, propname, strval, intval, 0);
return (error);
}
}
return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
int
vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
int err = 0;
uint64_t objid;
uint64_t vdev_guid;
nvpair_t *elem = NULL;
nvlist_t *nvprops = NULL;
uint64_t intval = 0;
char *strval = NULL;
const char *propname = NULL;
vdev_prop_t prop;
ASSERT(vd != NULL);
ASSERT(mos != NULL);
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops);
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
return (SET_ERROR(EINVAL));
}
ASSERT(objid != 0);
mutex_enter(&spa->spa_props_lock);
if (nvprops != NULL) {
char namebuf[64] = { 0 };
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
intval = 0;
strval = NULL;
propname = nvpair_name(elem);
prop = vdev_name_to_prop(propname);
zprop_source_t src = ZPROP_SRC_DEFAULT;
uint64_t integer_size, num_integers;
switch (prop) {
/* Special Read-only Properties */
case VDEV_PROP_NAME:
strval = vdev_name(vd, namebuf,
sizeof (namebuf));
if (strval == NULL)
continue;
vdev_prop_add_list(outnvl, propname, strval, 0,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CAPACITY:
/* percent used */
intval = (vd->vdev_stat.vs_dspace == 0) ? 0 :
(vd->vdev_stat.vs_alloc * 100 /
vd->vdev_stat.vs_dspace);
vdev_prop_add_list(outnvl, propname, NULL,
intval, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_STATE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_state, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_GUID:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_guid, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_asize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PSIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_psize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASHIFT:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_ashift, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_SIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace -
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ALLOCATED:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_EXPANDSZ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_esize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRAGMENTATION:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_fragmentation,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARITY:
vdev_prop_add_list(outnvl, propname, NULL,
vdev_get_nparity(vd), ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_DEVID:
if (vd->vdev_devid == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_devid, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PHYS_PATH:
if (vd->vdev_physpath == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_physpath, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ENC_PATH:
if (vd->vdev_enc_sysfs_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRU:
if (vd->vdev_fru == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_fru, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARENT:
if (vd->vdev_parent != NULL) {
strval = vdev_name(vd->vdev_parent,
namebuf, sizeof (namebuf));
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
}
continue;
case VDEV_PROP_CHILDREN:
if (vd->vdev_children > 0)
strval = kmem_zalloc(ZAP_MAXVALUELEN,
KM_SLEEP);
for (uint64_t i = 0; i < vd->vdev_children;
i++) {
const char *vname;
vname = vdev_name(vd->vdev_child[i],
namebuf, sizeof (namebuf));
if (vname == NULL)
vname = "(unknown)";
if (strlen(strval) > 0)
strlcat(strval, ",",
ZAP_MAXVALUELEN);
strlcat(strval, vname, ZAP_MAXVALUELEN);
}
if (strval != NULL) {
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
kmem_free(strval, ZAP_MAXVALUELEN);
}
continue;
case VDEV_PROP_NUMCHILDREN:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_children, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_READ_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_read_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_WRITE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_write_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CHECKSUM_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_checksum_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_INITIALIZE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_initialize_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_TRIM_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_trim_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_SLOW_IOS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_slow_ios,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_FLUSH. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_FLUSH],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_FLUSH. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_FLUSH],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_REMOVING:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_removing, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_RAIDZ_EXPANDING:
/* Only expose this for raidz */
if (vd->vdev_ops == &vdev_raidz_ops) {
vdev_prop_add_list(outnvl, propname,
NULL, vd->vdev_rz_expanding,
ZPROP_SRC_NONE);
}
continue;
case VDEV_PROP_TRIM_SUPPORT:
/* only valid for leaf vdevs */
if (vd->vdev_ops->vdev_op_leaf) {
vdev_prop_add_list(outnvl, propname,
NULL, vd->vdev_has_trim,
ZPROP_SRC_NONE);
}
continue;
/* Numeric Properites */
case VDEV_PROP_ALLOCATING:
/* Leaf vdevs cannot have this property */
if (vd->vdev_mg == NULL &&
vd->vdev_top != NULL) {
src = ZPROP_SRC_NONE;
intval = ZPROP_BOOLEAN_NA;
} else {
err = vdev_prop_get_int(vd, prop,
&intval);
if (err && err != ENOENT)
break;
if (intval ==
vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
else
src = ZPROP_SRC_LOCAL;
}
vdev_prop_add_list(outnvl, propname, NULL,
intval, src);
break;
case VDEV_PROP_FAILFAST:
src = ZPROP_SRC_LOCAL;
strval = NULL;
err = zap_lookup(mos, objid, nvpair_name(elem),
sizeof (uint64_t), 1, &intval);
if (err == ENOENT) {
intval = vdev_prop_default_numeric(
prop);
err = 0;
} else if (err) {
break;
}
if (intval == vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
vdev_prop_add_list(outnvl, propname, strval,
intval, src);
break;
case VDEV_PROP_CHECKSUM_N:
case VDEV_PROP_CHECKSUM_T:
case VDEV_PROP_IO_N:
case VDEV_PROP_IO_T:
case VDEV_PROP_SLOW_IO_N:
case VDEV_PROP_SLOW_IO_T:
err = vdev_prop_get_int(vd, prop, &intval);
if (err && err != ENOENT)
break;
if (intval == vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
else
src = ZPROP_SRC_LOCAL;
vdev_prop_add_list(outnvl, propname, NULL,
intval, src);
break;
/* Text Properties */
case VDEV_PROP_COMMENT:
/* Exists in the ZAP below */
/* FALLTHRU */
case VDEV_PROP_USERPROP:
/* User Properites */
src = ZPROP_SRC_LOCAL;
err = zap_length(mos, objid, nvpair_name(elem),
&integer_size, &num_integers);
if (err)
break;
switch (integer_size) {
case 8:
/* User properties cannot be integers */
err = EINVAL;
break;
case 1:
/* string property */
strval = kmem_alloc(num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid,
nvpair_name(elem), 1,
num_integers, strval);
if (err) {
kmem_free(strval,
num_integers);
break;
}
vdev_prop_add_list(outnvl, propname,
strval, 0, src);
kmem_free(strval, num_integers);
break;
}
break;
default:
err = ENOENT;
break;
}
if (err)
break;
}
} else {
/*
* Get all properties from the MOS vdev property object.
*/
zap_cursor_t zc;
zap_attribute_t *za = zap_attribute_alloc();
for (zap_cursor_init(&zc, mos, objid);
(err = zap_cursor_retrieve(&zc, za)) == 0;
zap_cursor_advance(&zc)) {
intval = 0;
strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
propname = za->za_name;
switch (za->za_integer_length) {
case 8:
/* We do not allow integer user properties */
/* This is likely an internal value */
break;
case 1:
/* string property */
strval = kmem_alloc(za->za_num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid, za->za_name, 1,
za->za_num_integers, strval);
if (err) {
kmem_free(strval, za->za_num_integers);
break;
}
vdev_prop_add_list(outnvl, propname, strval, 0,
src);
kmem_free(strval, za->za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
}
mutex_exit(&spa->spa_props_lock);
if (err && err != ENOENT) {
return (err);
}
return (0);
}
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW,
"Target number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW,
"Default lower limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_ms_shift, UINT, ZMOD_RW,
"Default upper limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW,
"Minimum number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW,
"Practical upper limit of total metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
"Rate limit slow IO (delay) events to this many per second");
ZFS_MODULE_PARAM(zfs, zfs_, deadman_events_per_second, UINT, ZMOD_RW,
"Rate limit hung IO (deadman) events to this many per second");
ZFS_MODULE_PARAM(zfs, zfs_, dio_write_verify_events_per_second, UINT, ZMOD_RW,
"Rate Direct I/O write verify events to this many per second");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, direct_write_verify, UINT, ZMOD_RW,
"Direct I/O writes will perform for checksum verification before "
"commiting write");
ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
"Rate limit checksum events to this many checksum errors per second "
"(do not set below ZED threshold).");
ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
"Ignore errors during resilver/scrub");
ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
"Bypass vdev_validate()");
ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
"Disable cache flushes");
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
"Minimum number of metaslabs required to dedicate one for log blocks");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
param_set_min_auto_ashift, param_get_uint, ZMOD_RW,
"Minimum ashift used when creating new top-level vdevs");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
param_set_max_auto_ashift, param_get_uint, ZMOD_RW,
"Maximum ashift used when optimizing for logical -> physical sector "
"size on new top-level vdevs");
+
+ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, raidz_impl,
+ param_set_raidz_impl, param_get_raidz_impl, ZMOD_RW,
+ "RAIDZ implementation");
diff --git a/sys/contrib/openzfs/module/zfs/vdev_draid.c b/sys/contrib/openzfs/module/zfs/vdev_draid.c
index 419c8ac5bb28..45f8bcfbd4ed 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_draid.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_draid.c
@@ -1,2821 +1,2821 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_rebuild.h>
#include <sys/abd.h>
#include <sys/zio.h>
#include <sys/nvpair.h>
#include <sys/zio_checksum.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <zfs_fletcher.h>
#ifdef ZFS_DEBUG
#include <sys/vdev.h> /* For vdev_xlate() in vdev_draid_io_verify() */
#endif
/*
* dRAID is a distributed spare implementation for ZFS. A dRAID vdev is
* comprised of multiple raidz redundancy groups which are spread over the
* dRAID children. To ensure an even distribution, and avoid hot spots, a
* permutation mapping is applied to the order of the dRAID children.
* This mixing effectively distributes the parity columns evenly over all
* of the disks in the dRAID.
*
* This is beneficial because it means when resilvering all of the disks
* can participate thereby increasing the available IOPs and bandwidth.
* Furthermore, by reserving a small fraction of each child's total capacity
* virtual distributed spare disks can be created. These spares similarly
* benefit from the performance gains of spanning all of the children. The
* consequence of which is that resilvering to a distributed spare can
* substantially reduce the time required to restore full parity to pool
* with a failed disks.
*
* === dRAID group layout ===
*
* First, let's define a "row" in the configuration to be a 16M chunk from
* each physical drive at the same offset. This is the minimum allowable
* size since it must be possible to store a full 16M block when there is
* only a single data column. Next, we define a "group" to be a set of
* sequential disks containing both the parity and data columns. We allow
* groups to span multiple rows in order to align any group size to any
* number of physical drives. Finally, a "slice" is comprised of the rows
* which contain the target number of groups. The permutation mappings
* are applied in a round robin fashion to each slice.
*
* Given D+P drives in a group (including parity drives) and C-S physical
* drives (not including the spare drives), we can distribute the groups
* across R rows without remainder by selecting the least common multiple
* of D+P and C-S as the number of groups; i.e. ngroups = LCM(D+P, C-S).
*
* In the example below, there are C=14 physical drives in the configuration
* with S=2 drives worth of spare capacity. Each group has a width of 9
* which includes D=8 data and P=1 parity drive. There are 4 groups and
* 3 rows per slice. Each group has a size of 144M (16M * 9) and a slice
* size is 576M (144M * 4). When allocating from a dRAID each group is
* filled before moving on to the next as show in slice0 below.
*
* data disks (8 data + 1 parity) spares (2)
* +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* ^ | 2 | 6 | 1 | 11| 4 | 0 | 7 | 10| 8 | 9 | 13| 5 | 12| 3 | device map 0
* | +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* | | group 0 | group 1..| |
* | +-----------------------------------+-----------+-------|
* | | 0 1 2 3 4 5 6 7 8 | 36 37 38| | r
* | | 9 10 11 12 13 14 15 16 17| 45 46 47| | o
* | | 18 19 20 21 22 23 24 25 26| 54 55 56| | w
* | 27 28 29 30 31 32 33 34 35| 63 64 65| | 0
* s +-----------------------+-----------------------+-------+
* l | ..group 1 | group 2.. | |
* i +-----------------------+-----------------------+-------+
* c | 39 40 41 42 43 44| 72 73 74 75 76 77| | r
* e | 48 49 50 51 52 53| 81 82 83 84 85 86| | o
* 0 | 57 58 59 60 61 62| 90 91 92 93 94 95| | w
* | 66 67 68 69 70 71| 99 100 101 102 103 104| | 1
* | +-----------+-----------+-----------------------+-------+
* | |..group 2 | group 3 | |
* | +-----------+-----------+-----------------------+-------+
* | | 78 79 80|108 109 110 111 112 113 114 115 116| | r
* | | 87 88 89|117 118 119 120 121 122 123 124 125| | o
* | | 96 97 98|126 127 128 129 130 131 132 133 134| | w
* v |105 106 107|135 136 137 138 139 140 141 142 143| | 2
* +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* | 9 | 11| 12| 2 | 4 | 1 | 3 | 0 | 10| 13| 8 | 5 | 6 | 7 | device map 1
* s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* l | group 4 | group 5..| | row 3
* i +-----------------------+-----------+-----------+-------|
* c | ..group 5 | group 6.. | | row 4
* e +-----------+-----------+-----------------------+-------+
* 1 |..group 6 | group 7 | | row 5
* +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* | 3 | 5 | 10| 8 | 6 | 11| 12| 0 | 2 | 4 | 7 | 1 | 9 | 13| device map 2
* s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* l | group 8 | group 9..| | row 6
* i +-----------------------------------------------+-------|
* c | ..group 9 | group 10.. | | row 7
* e +-----------------------+-----------------------+-------+
* 2 |..group 10 | group 11 | | row 8
* +-----------+-----------------------------------+-------+
*
* This layout has several advantages over requiring that each row contain
* a whole number of groups.
*
* 1. The group count is not a relevant parameter when defining a dRAID
* layout. Only the group width is needed, and *all* groups will have
* the desired size.
*
* 2. All possible group widths (<= physical disk count) can be supported.
*
* 3. The logic within vdev_draid.c is simplified when the group width is
* the same for all groups (although some of the logic around computing
* permutation numbers and drive offsets is more complicated).
*
* N.B. The following array describes all valid dRAID permutation maps.
* Each row is used to generate a permutation map for a different number
* of children from a unique seed. The seeds were generated and carefully
* evaluated by the 'draid' utility in order to provide balanced mappings.
* In addition to the seed a checksum of the in-memory mapping is stored
* for verification.
*
* The imbalance ratio of a given failure (e.g. 5 disks wide, child 3 failed,
* with a given permutation map) is the ratio of the amounts of I/O that will
* be sent to the least and most busy disks when resilvering. The average
* imbalance ratio (of a given number of disks and permutation map) is the
* average of the ratios of all possible single and double disk failures.
*
* In order to achieve a low imbalance ratio the number of permutations in
* the mapping must be significantly larger than the number of children.
* For dRAID the number of permutations has been limited to 512 to minimize
* the map size. This does result in a gradually increasing imbalance ratio
* as seen in the table below. Increasing the number of permutations for
* larger child counts would reduce the imbalance ratio. However, in practice
* when there are a large number of children each child is responsible for
* fewer total IOs so it's less of a concern.
*
* Note these values are hard coded and must never be changed. Existing
* pools depend on the same mapping always being generated in order to
* read and write from the correct locations. Any change would make
* existing pools completely inaccessible.
*/
static const draid_map_t draid_maps[VDEV_DRAID_MAX_MAPS] = {
{ 2, 256, 0x89ef3dabbcc7de37, 0x00000000433d433d }, /* 1.000 */
{ 3, 256, 0x89a57f3de98121b4, 0x00000000bcd8b7b5 }, /* 1.000 */
{ 4, 256, 0xc9ea9ec82340c885, 0x00000001819d7c69 }, /* 1.000 */
{ 5, 256, 0xf46733b7f4d47dfd, 0x00000002a1648d74 }, /* 1.010 */
{ 6, 256, 0x88c3c62d8585b362, 0x00000003d3b0c2c4 }, /* 1.031 */
{ 7, 256, 0x3a65d809b4d1b9d5, 0x000000055c4183ee }, /* 1.043 */
{ 8, 256, 0xe98930e3c5d2e90a, 0x00000006edfb0329 }, /* 1.059 */
{ 9, 256, 0x5a5430036b982ccb, 0x00000008ceaf6934 }, /* 1.056 */
{ 10, 256, 0x92bf389e9eadac74, 0x0000000b26668c09 }, /* 1.072 */
{ 11, 256, 0x74ccebf1dcf3ae80, 0x0000000dd691358c }, /* 1.083 */
{ 12, 256, 0x8847e41a1a9f5671, 0x00000010a0c63c8e }, /* 1.097 */
{ 13, 256, 0x7481b56debf0e637, 0x0000001424121fe4 }, /* 1.100 */
{ 14, 256, 0x559b8c44065f8967, 0x00000016ab2ff079 }, /* 1.121 */
{ 15, 256, 0x34c49545a2ee7f01, 0x0000001a6028efd6 }, /* 1.103 */
{ 16, 256, 0xb85f4fa81a7698f7, 0x0000001e95ff5e66 }, /* 1.111 */
{ 17, 256, 0x6353e47b7e47aba0, 0x00000021a81fa0fe }, /* 1.133 */
{ 18, 256, 0xaa549746b1cbb81c, 0x00000026f02494c9 }, /* 1.131 */
{ 19, 256, 0x892e343f2f31d690, 0x00000029eb392835 }, /* 1.130 */
{ 20, 256, 0x76914824db98cc3f, 0x0000003004f31a7c }, /* 1.141 */
{ 21, 256, 0x4b3cbabf9cfb1d0f, 0x00000036363a2408 }, /* 1.139 */
{ 22, 256, 0xf45c77abb4f035d4, 0x00000038dd0f3e84 }, /* 1.150 */
{ 23, 256, 0x5e18bd7f3fd4baf4, 0x0000003f0660391f }, /* 1.174 */
{ 24, 256, 0xa7b3a4d285d6503b, 0x000000443dfc9ff6 }, /* 1.168 */
{ 25, 256, 0x56ac7dd967521f5a, 0x0000004b03a87eb7 }, /* 1.180 */
{ 26, 256, 0x3a42dfda4eb880f7, 0x000000522c719bba }, /* 1.226 */
{ 27, 256, 0xd200d2fc6b54bf60, 0x0000005760b4fdf5 }, /* 1.228 */
{ 28, 256, 0xc52605bbd486c546, 0x0000005e00d8f74c }, /* 1.217 */
{ 29, 256, 0xc761779e63cd762f, 0x00000067be3cd85c }, /* 1.239 */
{ 30, 256, 0xca577b1e07f85ca5, 0x0000006f5517f3e4 }, /* 1.238 */
{ 31, 256, 0xfd50a593c518b3d4, 0x0000007370e7778f }, /* 1.273 */
{ 32, 512, 0xc6c87ba5b042650b, 0x000000f7eb08a156 }, /* 1.191 */
{ 33, 512, 0xc3880d0c9d458304, 0x0000010734b5d160 }, /* 1.199 */
{ 34, 512, 0xe920927e4d8b2c97, 0x00000118c1edbce0 }, /* 1.195 */
{ 35, 512, 0x8da7fcda87bde316, 0x0000012a3e9f9110 }, /* 1.201 */
{ 36, 512, 0xcf09937491514a29, 0x0000013bd6a24bef }, /* 1.194 */
{ 37, 512, 0x9b5abbf345cbd7cc, 0x0000014b9d90fac3 }, /* 1.237 */
{ 38, 512, 0x506312a44668d6a9, 0x0000015e1b5f6148 }, /* 1.242 */
{ 39, 512, 0x71659ede62b4755f, 0x00000173ef029bcd }, /* 1.231 */
{ 40, 512, 0xa7fde73fb74cf2d7, 0x000001866fb72748 }, /* 1.233 */
{ 41, 512, 0x19e8b461a1dea1d3, 0x000001a046f76b23 }, /* 1.271 */
{ 42, 512, 0x031c9b868cc3e976, 0x000001afa64c49d3 }, /* 1.263 */
{ 43, 512, 0xbaa5125faa781854, 0x000001c76789e278 }, /* 1.270 */
{ 44, 512, 0x4ed55052550d721b, 0x000001d800ccd8eb }, /* 1.281 */
{ 45, 512, 0x0fd63ddbdff90677, 0x000001f08ad59ed2 }, /* 1.282 */
{ 46, 512, 0x36d66546de7fdd6f, 0x000002016f09574b }, /* 1.286 */
{ 47, 512, 0x99f997e7eafb69d7, 0x0000021e42e47cb6 }, /* 1.329 */
{ 48, 512, 0xbecd9c2571312c5d, 0x000002320fe2872b }, /* 1.286 */
{ 49, 512, 0xd97371329e488a32, 0x0000024cd73f2ca7 }, /* 1.322 */
{ 50, 512, 0x30e9b136670749ee, 0x000002681c83b0e0 }, /* 1.335 */
{ 51, 512, 0x11ad6bc8f47aaeb4, 0x0000027e9261b5d5 }, /* 1.305 */
{ 52, 512, 0x68e445300af432c1, 0x0000029aa0eb7dbf }, /* 1.330 */
{ 53, 512, 0x910fb561657ea98c, 0x000002b3dca04853 }, /* 1.365 */
{ 54, 512, 0xd619693d8ce5e7a5, 0x000002cc280e9c97 }, /* 1.334 */
{ 55, 512, 0x24e281f564dbb60a, 0x000002e9fa842713 }, /* 1.364 */
{ 56, 512, 0x947a7d3bdaab44c5, 0x000003046680f72e }, /* 1.374 */
{ 57, 512, 0x2d44fec9c093e0de, 0x00000324198ba810 }, /* 1.363 */
{ 58, 512, 0x87743c272d29bb4c, 0x0000033ec48c9ac9 }, /* 1.401 */
{ 59, 512, 0x96aa3b6f67f5d923, 0x0000034faead902c }, /* 1.392 */
{ 60, 512, 0x94a4f1faf520b0d3, 0x0000037d713ab005 }, /* 1.360 */
{ 61, 512, 0xb13ed3a272f711a2, 0x00000397368f3cbd }, /* 1.396 */
{ 62, 512, 0x3b1b11805fa4a64a, 0x000003b8a5e2840c }, /* 1.453 */
{ 63, 512, 0x4c74caad9172ba71, 0x000003d4be280290 }, /* 1.437 */
{ 64, 512, 0x035ff643923dd29e, 0x000003fad6c355e1 }, /* 1.402 */
{ 65, 512, 0x768e9171b11abd3c, 0x0000040eb07fed20 }, /* 1.459 */
{ 66, 512, 0x75880e6f78a13ddd, 0x000004433d6acf14 }, /* 1.423 */
{ 67, 512, 0x910b9714f698a877, 0x00000451ea65d5db }, /* 1.447 */
{ 68, 512, 0x87f5db6f9fdcf5c7, 0x000004732169e3f7 }, /* 1.450 */
{ 69, 512, 0x836d4968fbaa3706, 0x000004954068a380 }, /* 1.455 */
{ 70, 512, 0xc567d73a036421ab, 0x000004bd7cb7bd3d }, /* 1.463 */
{ 71, 512, 0x619df40f240b8fed, 0x000004e376c2e972 }, /* 1.463 */
{ 72, 512, 0x42763a680d5bed8e, 0x000005084275c680 }, /* 1.452 */
{ 73, 512, 0x5866f064b3230431, 0x0000052906f2c9ab }, /* 1.498 */
{ 74, 512, 0x9fa08548b1621a44, 0x0000054708019247 }, /* 1.526 */
{ 75, 512, 0xb6053078ce0fc303, 0x00000572cc5c72b0 }, /* 1.491 */
{ 76, 512, 0x4a7aad7bf3890923, 0x0000058e987bc8e9 }, /* 1.470 */
{ 77, 512, 0xe165613fd75b5a53, 0x000005c20473a211 }, /* 1.527 */
{ 78, 512, 0x3ff154ac878163a6, 0x000005d659194bf3 }, /* 1.509 */
{ 79, 512, 0x24b93ade0aa8a532, 0x0000060a201c4f8e }, /* 1.569 */
{ 80, 512, 0xc18e2d14cd9bb554, 0x0000062c55cfe48c }, /* 1.555 */
{ 81, 512, 0x98cc78302feb58b6, 0x0000066656a07194 }, /* 1.509 */
{ 82, 512, 0xc6c5fd5a2abc0543, 0x0000067cff94fbf8 }, /* 1.596 */
{ 83, 512, 0xa7962f514acbba21, 0x000006ab7b5afa2e }, /* 1.568 */
{ 84, 512, 0xba02545069ddc6dc, 0x000006d19861364f }, /* 1.541 */
{ 85, 512, 0x447c73192c35073e, 0x000006fce315ce35 }, /* 1.623 */
{ 86, 512, 0x48beef9e2d42b0c2, 0x00000720a8e38b6b }, /* 1.620 */
{ 87, 512, 0x4874cf98541a35e0, 0x00000758382a2273 }, /* 1.597 */
{ 88, 512, 0xad4cf8333a31127a, 0x00000781e1651b1b }, /* 1.575 */
{ 89, 512, 0x47ae4859d57888c1, 0x000007b27edbe5bc }, /* 1.627 */
{ 90, 512, 0x06f7723cfe5d1891, 0x000007dc2a96d8eb }, /* 1.596 */
{ 91, 512, 0xd4e44218d660576d, 0x0000080ac46f02d5 }, /* 1.622 */
{ 92, 512, 0x7066702b0d5be1f2, 0x00000832c96d154e }, /* 1.695 */
{ 93, 512, 0x011209b4f9e11fb9, 0x0000085eefda104c }, /* 1.605 */
{ 94, 512, 0x47ffba30a0b35708, 0x00000899badc32dc }, /* 1.625 */
{ 95, 512, 0x1a95a6ac4538aaa8, 0x000008b6b69a42b2 }, /* 1.687 */
{ 96, 512, 0xbda2b239bb2008eb, 0x000008f22d2de38a }, /* 1.621 */
{ 97, 512, 0x7ffa0bea90355c6c, 0x0000092e5b23b816 }, /* 1.699 */
{ 98, 512, 0x1d56ba34be426795, 0x0000094f482e5d1b }, /* 1.688 */
{ 99, 512, 0x0aa89d45c502e93d, 0x00000977d94a98ce }, /* 1.642 */
{ 100, 512, 0x54369449f6857774, 0x000009c06c9b34cc }, /* 1.683 */
{ 101, 512, 0xf7d4dd8445b46765, 0x000009e5dc542259 }, /* 1.755 */
{ 102, 512, 0xfa8866312f169469, 0x00000a16b54eae93 }, /* 1.692 */
{ 103, 512, 0xd8a5aea08aef3ff9, 0x00000a381d2cbfe7 }, /* 1.747 */
{ 104, 512, 0x66bcd2c3d5f9ef0e, 0x00000a8191817be7 }, /* 1.751 */
{ 105, 512, 0x3fb13a47a012ec81, 0x00000ab562b9a254 }, /* 1.751 */
{ 106, 512, 0x43100f01c9e5e3ca, 0x00000aeee84c185f }, /* 1.726 */
{ 107, 512, 0xca09c50ccee2d054, 0x00000b1c359c047d }, /* 1.788 */
{ 108, 512, 0xd7176732ac503f9b, 0x00000b578bc52a73 }, /* 1.740 */
{ 109, 512, 0xed206e51f8d9422d, 0x00000b8083e0d960 }, /* 1.780 */
{ 110, 512, 0x17ead5dc6ba0dcd6, 0x00000bcfb1a32ca8 }, /* 1.836 */
{ 111, 512, 0x5f1dc21e38a969eb, 0x00000c0171becdd6 }, /* 1.778 */
{ 112, 512, 0xddaa973de33ec528, 0x00000c3edaba4b95 }, /* 1.831 */
{ 113, 512, 0x2a5eccd7735a3630, 0x00000c630664e7df }, /* 1.825 */
{ 114, 512, 0xafcccee5c0b71446, 0x00000cb65392f6e4 }, /* 1.826 */
{ 115, 512, 0x8fa30c5e7b147e27, 0x00000cd4db391e55 }, /* 1.843 */
{ 116, 512, 0x5afe0711fdfafd82, 0x00000d08cb4ec35d }, /* 1.826 */
{ 117, 512, 0x533a6090238afd4c, 0x00000d336f115d1b }, /* 1.803 */
{ 118, 512, 0x90cf11b595e39a84, 0x00000d8e041c2048 }, /* 1.857 */
{ 119, 512, 0x0d61a3b809444009, 0x00000dcb798afe35 }, /* 1.877 */
{ 120, 512, 0x7f34da0f54b0d114, 0x00000df3922664e1 }, /* 1.849 */
{ 121, 512, 0xa52258d5b72f6551, 0x00000e4d37a9872d }, /* 1.867 */
{ 122, 512, 0xc1de54d7672878db, 0x00000e6583a94cf6 }, /* 1.978 */
{ 123, 512, 0x1d03354316a414ab, 0x00000ebffc50308d }, /* 1.947 */
{ 124, 512, 0xcebdcc377665412c, 0x00000edee1997cea }, /* 1.865 */
{ 125, 512, 0x4ddd4c04b1a12344, 0x00000f21d64b373f }, /* 1.881 */
{ 126, 512, 0x64fc8f94e3973658, 0x00000f8f87a8896b }, /* 1.882 */
{ 127, 512, 0x68765f78034a334e, 0x00000fb8fe62197e }, /* 1.867 */
{ 128, 512, 0xaf36b871a303e816, 0x00000fec6f3afb1e }, /* 1.972 */
{ 129, 512, 0x2a4cbf73866c3a28, 0x00001027febfe4e5 }, /* 1.896 */
{ 130, 512, 0x9cb128aacdcd3b2f, 0x0000106aa8ac569d }, /* 1.965 */
{ 131, 512, 0x5511d41c55869124, 0x000010bbd755ddf1 }, /* 1.963 */
{ 132, 512, 0x42f92461937f284a, 0x000010fb8bceb3b5 }, /* 1.925 */
{ 133, 512, 0xe2d89a1cf6f1f287, 0x0000114cf5331e34 }, /* 1.862 */
{ 134, 512, 0xdc631a038956200e, 0x0000116428d2adc5 }, /* 2.042 */
{ 135, 512, 0xb2e5ac222cd236be, 0x000011ca88e4d4d2 }, /* 1.935 */
{ 136, 512, 0xbc7d8236655d88e7, 0x000011e39cb94e66 }, /* 2.005 */
{ 137, 512, 0x073e02d88d2d8e75, 0x0000123136c7933c }, /* 2.041 */
{ 138, 512, 0x3ddb9c3873166be0, 0x00001280e4ec6d52 }, /* 1.997 */
{ 139, 512, 0x7d3b1a845420e1b5, 0x000012c2e7cd6a44 }, /* 1.996 */
{ 140, 512, 0x60102308aa7b2a6c, 0x000012fc490e6c7d }, /* 2.053 */
{ 141, 512, 0xdb22bb2f9eb894aa, 0x00001343f5a85a1a }, /* 1.971 */
{ 142, 512, 0xd853f879a13b1606, 0x000013bb7d5f9048 }, /* 2.018 */
{ 143, 512, 0x001620a03f804b1d, 0x000013e74cc794fd }, /* 1.961 */
{ 144, 512, 0xfdb52dda76fbf667, 0x00001442d2f22480 }, /* 2.046 */
{ 145, 512, 0xa9160110f66e24ff, 0x0000144b899f9dbb }, /* 1.968 */
{ 146, 512, 0x77306a30379ae03b, 0x000014cb98eb1f81 }, /* 2.143 */
{ 147, 512, 0x14f5985d2752319d, 0x000014feab821fc9 }, /* 2.064 */
{ 148, 512, 0xa4b8ff11de7863f8, 0x0000154a0e60b9c9 }, /* 2.023 */
{ 149, 512, 0x44b345426455c1b3, 0x000015999c3c569c }, /* 2.136 */
{ 150, 512, 0x272677826049b46c, 0x000015c9697f4b92 }, /* 2.063 */
{ 151, 512, 0x2f9216e2cd74fe40, 0x0000162b1f7bbd39 }, /* 1.974 */
{ 152, 512, 0x706ae3e763ad8771, 0x00001661371c55e1 }, /* 2.210 */
{ 153, 512, 0xf7fd345307c2480e, 0x000016e251f28b6a }, /* 2.006 */
{ 154, 512, 0x6e94e3d26b3139eb, 0x000016f2429bb8c6 }, /* 2.193 */
{ 155, 512, 0x5458bbfbb781fcba, 0x0000173efdeca1b9 }, /* 2.163 */
{ 156, 512, 0xa80e2afeccd93b33, 0x000017bfdcb78adc }, /* 2.046 */
{ 157, 512, 0x1e4ccbb22796cf9d, 0x00001826fdcc39c9 }, /* 2.084 */
{ 158, 512, 0x8fba4b676aaa3663, 0x00001841a1379480 }, /* 2.264 */
{ 159, 512, 0xf82b843814b315fa, 0x000018886e19b8a3 }, /* 2.074 */
{ 160, 512, 0x7f21e920ecf753a3, 0x0000191812ca0ea7 }, /* 2.282 */
{ 161, 512, 0x48bb8ea2c4caa620, 0x0000192f310faccf }, /* 2.148 */
{ 162, 512, 0x5cdb652b4952c91b, 0x0000199e1d7437c7 }, /* 2.355 */
{ 163, 512, 0x6ac1ba6f78c06cd4, 0x000019cd11f82c70 }, /* 2.164 */
{ 164, 512, 0x9faf5f9ca2669a56, 0x00001a18d5431f6a }, /* 2.393 */
{ 165, 512, 0xaa57e9383eb01194, 0x00001a9e7d253d85 }, /* 2.178 */
{ 166, 512, 0x896967bf495c34d2, 0x00001afb8319b9fc }, /* 2.334 */
{ 167, 512, 0xdfad5f05de225f1b, 0x00001b3a59c3093b }, /* 2.266 */
{ 168, 512, 0xfd299a99f9f2abdd, 0x00001bb6f1a10799 }, /* 2.304 */
{ 169, 512, 0xdda239e798fe9fd4, 0x00001bfae0c9692d }, /* 2.218 */
{ 170, 512, 0x5fca670414a32c3e, 0x00001c22129dbcff }, /* 2.377 */
{ 171, 512, 0x1bb8934314b087de, 0x00001c955db36cd0 }, /* 2.155 */
{ 172, 512, 0xd96394b4b082200d, 0x00001cfc8619b7e6 }, /* 2.404 */
{ 173, 512, 0xb612a7735b1c8cbc, 0x00001d303acdd585 }, /* 2.205 */
{ 174, 512, 0x28e7430fe5875fe1, 0x00001d7ed5b3697d }, /* 2.359 */
{ 175, 512, 0x5038e89efdd981b9, 0x00001dc40ec35c59 }, /* 2.158 */
{ 176, 512, 0x075fd78f1d14db7c, 0x00001e31c83b4a2b }, /* 2.614 */
{ 177, 512, 0xc50fafdb5021be15, 0x00001e7cdac82fbc }, /* 2.239 */
{ 178, 512, 0xe6dc7572ce7b91c7, 0x00001edd8bb454fc }, /* 2.493 */
{ 179, 512, 0x21f7843e7beda537, 0x00001f3a8e019d6c }, /* 2.327 */
{ 180, 512, 0xc83385e20b43ec82, 0x00001f70735ec137 }, /* 2.231 */
{ 181, 512, 0xca818217dddb21fd, 0x0000201ca44c5a3c }, /* 2.237 */
{ 182, 512, 0xe6035defea48f933, 0x00002038e3346658 }, /* 2.691 */
{ 183, 512, 0x47262a4f953dac5a, 0x000020c2e554314e }, /* 2.170 */
{ 184, 512, 0xe24c7246260873ea, 0x000021197e618d64 }, /* 2.600 */
{ 185, 512, 0xeef6b57c9b58e9e1, 0x0000217ea48ecddc }, /* 2.391 */
{ 186, 512, 0x2becd3346e386142, 0x000021c496d4a5f9 }, /* 2.677 */
{ 187, 512, 0x63c6207bdf3b40a3, 0x0000220e0f2eec0c }, /* 2.410 */
{ 188, 512, 0x3056ce8989767d4b, 0x0000228eb76cd137 }, /* 2.776 */
{ 189, 512, 0x91af61c307cee780, 0x000022e17e2ea501 }, /* 2.266 */
{ 190, 512, 0xda359da225f6d54f, 0x00002358a2debc19 }, /* 2.717 */
{ 191, 512, 0x0a5f7a2a55607ba0, 0x0000238a79dac18c }, /* 2.474 */
{ 192, 512, 0x27bb75bf5224638a, 0x00002403a58e2351 }, /* 2.673 */
{ 193, 512, 0x1ebfdb94630f5d0f, 0x00002492a10cb339 }, /* 2.420 */
{ 194, 512, 0x6eae5e51d9c5f6fb, 0x000024ce4bf98715 }, /* 2.898 */
{ 195, 512, 0x08d903b4daedc2e0, 0x0000250d1e15886c }, /* 2.363 */
{ 196, 512, 0xc722a2f7fa7cd686, 0x0000258a99ed0c9e }, /* 2.747 */
{ 197, 512, 0x8f71faf0e54e361d, 0x000025dee11976f5 }, /* 2.531 */
{ 198, 512, 0x87f64695c91a54e7, 0x0000264e00a43da0 }, /* 2.707 */
{ 199, 512, 0xc719cbac2c336b92, 0x000026d327277ac1 }, /* 2.315 */
{ 200, 512, 0xe7e647afaf771ade, 0x000027523a5c44bf }, /* 3.012 */
{ 201, 512, 0x12d4b5c38ce8c946, 0x0000273898432545 }, /* 2.378 */
{ 202, 512, 0xf2e0cd4067bdc94a, 0x000027e47bb2c935 }, /* 2.969 */
{ 203, 512, 0x21b79f14d6d947d3, 0x0000281e64977f0d }, /* 2.594 */
{ 204, 512, 0x515093f952f18cd6, 0x0000289691a473fd }, /* 2.763 */
{ 205, 512, 0xd47b160a1b1022c8, 0x00002903e8b52411 }, /* 2.457 */
{ 206, 512, 0xc02fc96684715a16, 0x0000297515608601 }, /* 3.057 */
{ 207, 512, 0xef51e68efba72ed0, 0x000029ef73604804 }, /* 2.590 */
{ 208, 512, 0x9e3be6e5448b4f33, 0x00002a2846ed074b }, /* 3.047 */
{ 209, 512, 0x81d446c6d5fec063, 0x00002a92ca693455 }, /* 2.676 */
{ 210, 512, 0xff215de8224e57d5, 0x00002b2271fe3729 }, /* 2.993 */
{ 211, 512, 0xe2524d9ba8f69796, 0x00002b64b99c3ba2 }, /* 2.457 */
{ 212, 512, 0xf6b28e26097b7e4b, 0x00002bd768b6e068 }, /* 3.182 */
{ 213, 512, 0x893a487f30ce1644, 0x00002c67f722b4b2 }, /* 2.563 */
{ 214, 512, 0x386566c3fc9871df, 0x00002cc1cf8b4037 }, /* 3.025 */
{ 215, 512, 0x1e0ed78edf1f558a, 0x00002d3948d36c7f }, /* 2.730 */
{ 216, 512, 0xe3bc20c31e61f113, 0x00002d6d6b12e025 }, /* 3.036 */
{ 217, 512, 0xd6c3ad2e23021882, 0x00002deff7572241 }, /* 2.722 */
{ 218, 512, 0xb4a9f95cf0f69c5a, 0x00002e67d537aa36 }, /* 3.356 */
{ 219, 512, 0x6e98ed6f6c38e82f, 0x00002e9720626789 }, /* 2.697 */
{ 220, 512, 0x2e01edba33fddac7, 0x00002f407c6b0198 }, /* 2.979 */
{ 221, 512, 0x559d02e1f5f57ccc, 0x00002fb6a5ab4f24 }, /* 2.858 */
{ 222, 512, 0xac18f5a916adcd8e, 0x0000304ae1c5c57e }, /* 3.258 */
{ 223, 512, 0x15789fbaddb86f4b, 0x0000306f6e019c78 }, /* 2.693 */
{ 224, 512, 0xf4a9c36d5bc4c408, 0x000030da40434213 }, /* 3.259 */
{ 225, 512, 0xf640f90fd2727f44, 0x00003189ed37b90c }, /* 2.733 */
{ 226, 512, 0xb5313d390d61884a, 0x000031e152616b37 }, /* 3.235 */
{ 227, 512, 0x4bae6b3ce9160939, 0x0000321f40aeac42 }, /* 2.983 */
{ 228, 512, 0x838c34480f1a66a1, 0x000032f389c0f78e }, /* 3.308 */
{ 229, 512, 0xb1c4a52c8e3d6060, 0x0000330062a40284 }, /* 2.715 */
{ 230, 512, 0xe0f1110c6d0ed822, 0x0000338be435644f }, /* 3.540 */
{ 231, 512, 0x9f1a8ccdcea68d4b, 0x000034045a4e97e1 }, /* 2.779 */
{ 232, 512, 0x3261ed62223f3099, 0x000034702cfc401c }, /* 3.084 */
{ 233, 512, 0xf2191e2311022d65, 0x00003509dd19c9fc }, /* 2.987 */
{ 234, 512, 0xf102a395c2033abc, 0x000035654dc96fae }, /* 3.341 */
{ 235, 512, 0x11fe378f027906b6, 0x000035b5193b0264 }, /* 2.793 */
{ 236, 512, 0xf777f2c026b337aa, 0x000036704f5d9297 }, /* 3.518 */
{ 237, 512, 0x1b04e9c2ee143f32, 0x000036dfbb7af218 }, /* 2.962 */
{ 238, 512, 0x2fcec95266f9352c, 0x00003785c8df24a9 }, /* 3.196 */
{ 239, 512, 0xfe2b0e47e427dd85, 0x000037cbdf5da729 }, /* 2.914 */
{ 240, 512, 0x72b49bf2225f6c6d, 0x0000382227c15855 }, /* 3.408 */
{ 241, 512, 0x50486b43df7df9c7, 0x0000389b88be6453 }, /* 2.903 */
{ 242, 512, 0x5192a3e53181c8ab, 0x000038ddf3d67263 }, /* 3.778 */
{ 243, 512, 0xe9f5d8365296fd5e, 0x0000399f1c6c9e9c }, /* 3.026 */
{ 244, 512, 0xc740263f0301efa8, 0x00003a147146512d }, /* 3.347 */
{ 245, 512, 0x23cd0f2b5671e67d, 0x00003ab10bcc0d9d }, /* 3.212 */
{ 246, 512, 0x002ccc7e5cd41390, 0x00003ad6cd14a6c0 }, /* 3.482 */
{ 247, 512, 0x9aafb3c02544b31b, 0x00003b8cb8779fb0 }, /* 3.146 */
{ 248, 512, 0x72ba07a78b121999, 0x00003c24142a5a3f }, /* 3.626 */
{ 249, 512, 0x3d784aa58edfc7b4, 0x00003cd084817d99 }, /* 2.952 */
{ 250, 512, 0xaab750424d8004af, 0x00003d506a8e098e }, /* 3.463 */
{ 251, 512, 0x84403fcf8e6b5ca2, 0x00003d4c54c2aec4 }, /* 3.131 */
{ 252, 512, 0x71eb7455ec98e207, 0x00003e655715cf2c }, /* 3.538 */
{ 253, 512, 0xd752b4f19301595b, 0x00003ecd7b2ca5ac }, /* 2.974 */
{ 254, 512, 0xc4674129750499de, 0x00003e99e86d3e95 }, /* 3.843 */
{ 255, 512, 0x9772baff5cd12ef5, 0x00003f895c019841 }, /* 3.088 */
};
/*
* Verify the map is valid. Each device index must appear exactly
* once in every row, and the permutation array checksum must match.
*/
static int
verify_perms(uint8_t *perms, uint64_t children, uint64_t nperms,
uint64_t checksum)
{
int countssz = sizeof (uint16_t) * children;
uint16_t *counts = kmem_zalloc(countssz, KM_SLEEP);
for (int i = 0; i < nperms; i++) {
for (int j = 0; j < children; j++) {
uint8_t val = perms[(i * children) + j];
if (val >= children || counts[val] != i) {
kmem_free(counts, countssz);
return (EINVAL);
}
counts[val]++;
}
}
if (checksum != 0) {
int permssz = sizeof (uint8_t) * children * nperms;
zio_cksum_t cksum;
fletcher_4_native_varsize(perms, permssz, &cksum);
if (checksum != cksum.zc_word[0]) {
kmem_free(counts, countssz);
return (ECKSUM);
}
}
kmem_free(counts, countssz);
return (0);
}
/*
* Generate the permutation array for the draid_map_t. These maps control
* the placement of all data in a dRAID. Therefore it's critical that the
* seed always generates the same mapping. We provide our own pseudo-random
* number generator for this purpose.
*/
int
vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp)
{
VERIFY3U(map->dm_children, >=, VDEV_DRAID_MIN_CHILDREN);
VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN);
VERIFY3U(map->dm_seed, !=, 0);
VERIFY3U(map->dm_nperms, !=, 0);
VERIFY3P(map->dm_perms, ==, NULL);
#ifdef _KERNEL
/*
* The kernel code always provides both a map_seed and checksum.
* Only the tests/zfs-tests/cmd/draid/draid.c utility will provide
* a zero checksum when generating new candidate maps.
*/
VERIFY3U(map->dm_checksum, !=, 0);
#endif
uint64_t children = map->dm_children;
uint64_t nperms = map->dm_nperms;
int rowsz = sizeof (uint8_t) * children;
int permssz = rowsz * nperms;
uint8_t *perms;
/* Allocate the permutation array */
perms = vmem_alloc(permssz, KM_SLEEP);
/* Setup an initial row with a known pattern */
uint8_t *initial_row = kmem_alloc(rowsz, KM_SLEEP);
for (int i = 0; i < children; i++)
initial_row[i] = i;
uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed };
uint8_t *current_row, *previous_row = initial_row;
/*
* Perform a Fisher-Yates shuffle of each row using the previous
* row as the starting point. An initial_row with known pattern
* is used as the input for the first row.
*/
for (int i = 0; i < nperms; i++) {
current_row = &perms[i * children];
memcpy(current_row, previous_row, rowsz);
for (int j = children - 1; j > 0; j--) {
uint64_t k = vdev_draid_rand(draid_seed) % (j + 1);
uint8_t val = current_row[j];
current_row[j] = current_row[k];
current_row[k] = val;
}
previous_row = current_row;
}
kmem_free(initial_row, rowsz);
int error = verify_perms(perms, children, nperms, map->dm_checksum);
if (error) {
vmem_free(perms, permssz);
return (error);
}
*permsp = perms;
return (0);
}
/*
* Lookup the fixed draid_map_t for the requested number of children.
*/
int
vdev_draid_lookup_map(uint64_t children, const draid_map_t **mapp)
{
for (int i = 0; i < VDEV_DRAID_MAX_MAPS; i++) {
if (draid_maps[i].dm_children == children) {
*mapp = &draid_maps[i];
return (0);
}
}
return (ENOENT);
}
/*
* Lookup the permutation array and iteration id for the provided offset.
*/
static void
vdev_draid_get_perm(vdev_draid_config_t *vdc, uint64_t pindex,
uint8_t **base, uint64_t *iter)
{
uint64_t ncols = vdc->vdc_children;
uint64_t poff = pindex % (vdc->vdc_nperms * ncols);
*base = vdc->vdc_perms + (poff / ncols) * ncols;
*iter = poff % ncols;
}
static inline uint64_t
vdev_draid_permute_id(vdev_draid_config_t *vdc,
uint8_t *base, uint64_t iter, uint64_t index)
{
return ((base[index] + iter) % vdc->vdc_children);
}
/*
* Return the asize which is the psize rounded up to a full group width.
* i.e. vdev_draid_psize_to_asize().
*/
static uint64_t
vdev_draid_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
{
(void) txg;
vdev_draid_config_t *vdc = vd->vdev_tsd;
uint64_t ashift = vd->vdev_ashift;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
uint64_t rows = ((psize - 1) / (vdc->vdc_ndata << ashift)) + 1;
uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
ASSERT3U(asize, !=, 0);
ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0);
return (asize);
}
/*
* Deflate the asize to the psize, this includes stripping parity.
*/
uint64_t
vdev_draid_asize_to_psize(vdev_t *vd, uint64_t asize)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT0(asize % vdc->vdc_groupwidth);
return ((asize / vdc->vdc_groupwidth) * vdc->vdc_ndata);
}
/*
* Convert a logical offset to the corresponding group number.
*/
static uint64_t
vdev_draid_offset_to_group(vdev_t *vd, uint64_t offset)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (offset / vdc->vdc_groupsz);
}
/*
* Convert a group number to the logical starting offset for that group.
*/
static uint64_t
vdev_draid_group_to_offset(vdev_t *vd, uint64_t group)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (group * vdc->vdc_groupsz);
}
/*
* Full stripe writes. When writing, all columns (D+P) are required. Parity
* is calculated over all the columns, including empty zero filled sectors,
* and each is written to disk. While only the data columns are needed for
* a normal read, all of the columns are required for reconstruction when
* performing a sequential resilver.
*
* For "big columns" it's sufficient to map the correct range of the zio ABD.
* Partial columns require allocating a gang ABD in order to zero fill the
* empty sectors. When the column is empty a zero filled sector must be
* mapped. In all cases the data ABDs must be the same size as the parity
* ABDs (e.g. rc->rc_size == parity_size).
*/
static void
vdev_draid_map_alloc_write(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t abd_off = abd_offset;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(parity_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0) {
/* empty data column (small write), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
rc->rc_abd = abd_get_zeros(skip_size);
} else if (rc->rc_size == parity_size) {
/* this is a "big column" */
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, abd_off, rc->rc_size);
} else {
/* short data column, add a skip sector */
ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, abd_get_offset_size(
zio->io_abd, abd_off, rc->rc_size), B_TRUE);
abd_gang_add(rc->rc_abd, abd_get_zeros(skip_size),
B_TRUE);
}
ASSERT3U(abd_get_size(rc->rc_abd), ==, parity_size);
abd_off += rc->rc_size;
rc->rc_size = parity_size;
}
IMPLY(abd_offset != 0, abd_off == zio->io_size);
}
/*
* Scrub/resilver reads. In order to store the contents of the skip sectors
* an additional ABD is allocated. The columns are handled in the same way
* as a full stripe write except instead of using the zero ABD the newly
* allocated skip ABD is used to back the skip sectors. In all cases the
* data ABD must be the same size as the parity ABDs.
*/
static void
vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t abd_off = abd_offset;
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
ASSERT3P(rr->rr_abd_empty, ==, NULL);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
B_FALSE);
}
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0) {
/* empty data column (small read), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
skip_off, skip_size);
skip_off += skip_size;
} else if (rc->rc_size == parity_size) {
/* this is a "big column" */
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, abd_off, rc->rc_size);
} else {
/* short data column, add a skip sector */
ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, abd_get_offset_size(
zio->io_abd, abd_off, rc->rc_size), B_TRUE);
abd_gang_add(rc->rc_abd, abd_get_offset_size(
rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
skip_off += skip_size;
}
uint64_t abd_size = abd_get_size(rc->rc_abd);
ASSERT3U(abd_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
/*
* Increase rc_size so the skip ABD is included in subsequent
* parity calculations.
*/
abd_off += rc->rc_size;
rc->rc_size = abd_size;
}
IMPLY(abd_offset != 0, abd_off == zio->io_size);
ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
}
/*
* Normal reads. In this common case only the columns containing data
* are read in to the zio ABDs. Neither the parity columns or empty skip
* sectors are read unless the checksum fails verification. In which case
* vdev_raidz_read_all() will call vdev_draid_map_alloc_empty() to expand
* the raid map in order to allow reconstruction using the parity data and
* skip sectors.
*/
static void
vdev_draid_map_alloc_read(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
{
uint64_t abd_off = abd_offset;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size > 0) {
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, abd_off, rc->rc_size);
abd_off += rc->rc_size;
}
}
IMPLY(abd_offset != 0, abd_off == zio->io_size);
}
/*
* Converts a normal "read" raidz_row_t to a "scrub" raidz_row_t. The key
* difference is that an ABD is allocated to back skip sectors so they may
* be read in to memory, verified, and repaired if needed.
*/
void
vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
ASSERT3P(rr->rr_abd_empty, ==, NULL);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
B_FALSE);
}
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0) {
/* empty data column (small read), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
ASSERT3P(rc->rc_abd, ==, NULL);
rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
skip_off, skip_size);
skip_off += skip_size;
} else if (rc->rc_size == parity_size) {
/* this is a "big column", nothing to add */
ASSERT3P(rc->rc_abd, !=, NULL);
} else {
/*
* short data column, add a skip sector and clear
* rc_tried to force the entire column to be re-read
* thereby including the missing skip sector data
* which is needed for reconstruction.
*/
ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
ASSERT3P(rc->rc_abd, !=, NULL);
ASSERT(!abd_is_gang(rc->rc_abd));
abd_t *read_abd = rc->rc_abd;
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, read_abd, B_TRUE);
abd_gang_add(rc->rc_abd, abd_get_offset_size(
rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
skip_off += skip_size;
rc->rc_tried = 0;
}
/*
* Increase rc_size so the empty ABD is included in subsequent
* parity calculations.
*/
rc->rc_size = parity_size;
}
ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
}
/*
* Verify that all empty sectors are zero filled before using them to
* calculate parity. Otherwise, silent corruption in an empty sector will
* result in bad parity being generated. That bad parity will then be
* considered authoritative and overwrite the good parity on disk. This
* is possible because the checksum is only calculated over the data,
* thus it cannot be used to detect damage in empty sectors.
*/
int
vdev_draid_map_verify_empty(zio_t *zio, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t skip_off = parity_size - skip_size;
uint64_t empty_off = 0;
int ret = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
ASSERT3P(rr->rr_abd_empty, !=, NULL);
ASSERT3U(rr->rr_bigcols, >, 0);
void *zero_buf = kmem_zalloc(skip_size, KM_SLEEP);
for (int c = rr->rr_bigcols; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
ASSERT3P(rc->rc_abd, !=, NULL);
ASSERT3U(rc->rc_size, ==, parity_size);
if (abd_cmp_buf_off(rc->rc_abd, zero_buf, skip_off,
skip_size) != 0) {
vdev_raidz_checksum_error(zio, rc, rc->rc_abd);
abd_zero_off(rc->rc_abd, skip_off, skip_size);
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
empty_off += skip_size;
}
ASSERT3U(empty_off, ==, abd_get_size(rr->rr_abd_empty));
kmem_free(zero_buf, skip_size);
return (ret);
}
/*
* Given a logical address within a dRAID configuration, return the physical
* address on the first drive in the group that this address maps to
* (at position 'start' in permutation number 'perm').
*/
static uint64_t
vdev_draid_logical_to_physical(vdev_t *vd, uint64_t logical_offset,
uint64_t *perm, uint64_t *start)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
/* b is the dRAID (parent) sector offset. */
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t b_offset = logical_offset >> ashift;
/*
* The height of a row in units of the vdev's minimum sector size.
* This is the amount of data written to each disk of each group
* in a given permutation.
*/
uint64_t rowheight_sectors = VDEV_DRAID_ROWHEIGHT >> ashift;
/*
* We cycle through a disk permutation every groupsz * ngroups chunk
* of address space. Note that ngroups * groupsz must be a multiple
* of the number of data drives (ndisks) in order to guarantee
* alignment. So, for example, if our row height is 16MB, our group
* size is 10, and there are 13 data drives in the draid, then ngroups
* will be 13, we will change permutation every 2.08GB and each
* disk will have 160MB of data per chunk.
*/
uint64_t groupwidth = vdc->vdc_groupwidth;
uint64_t ngroups = vdc->vdc_ngroups;
uint64_t ndisks = vdc->vdc_ndisks;
/*
* groupstart is where the group this IO will land in "starts" in
* the permutation array.
*/
uint64_t group = logical_offset / vdc->vdc_groupsz;
uint64_t groupstart = (group * groupwidth) % ndisks;
ASSERT3U(groupstart + groupwidth, <=, ndisks + groupstart);
*start = groupstart;
/* b_offset is the sector offset within a group chunk */
b_offset = b_offset % (rowheight_sectors * groupwidth);
ASSERT0(b_offset % groupwidth);
/*
* Find the starting byte offset on each child vdev:
* - within a permutation there are ngroups groups spread over the
* rows, where each row covers a slice portion of the disk
* - each permutation has (groupwidth * ngroups) / ndisks rows
* - so each permutation covers rows * slice portion of the disk
* - so we need to find the row where this IO group target begins
*/
*perm = group / ngroups;
uint64_t row = (*perm * ((groupwidth * ngroups) / ndisks)) +
(((group % ngroups) * groupwidth) / ndisks);
return (((rowheight_sectors * row) +
(b_offset / groupwidth)) << ashift);
}
static uint64_t
vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
uint64_t abd_offset, uint64_t abd_size)
{
vdev_t *vd = zio->io_vd;
vdev_draid_config_t *vdc = vd->vdev_tsd;
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t io_size = abd_size;
uint64_t io_asize = vdev_draid_asize(vd, io_size, 0);
uint64_t group = vdev_draid_offset_to_group(vd, io_offset);
uint64_t start_offset = vdev_draid_group_to_offset(vd, group + 1);
/*
* Limit the io_size to the space remaining in the group. A second
* row in the raidz_map_t is created for the remainder.
*/
if (io_offset + io_asize > start_offset) {
io_size = vdev_draid_asize_to_psize(vd,
start_offset - io_offset);
}
/*
* At most a block may span the logical end of one group and the start
* of the next group. Therefore, at the end of a group the io_size must
* span the group width evenly and the remainder must be aligned to the
* start of the next group.
*/
IMPLY(abd_offset == 0 && io_size < zio->io_size,
(io_asize >> ashift) % vdc->vdc_groupwidth == 0);
IMPLY(abd_offset != 0,
vdev_draid_group_to_offset(vd, group) == io_offset);
/* Lookup starting byte offset on each child vdev */
uint64_t groupstart, perm;
uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
io_offset, &perm, &groupstart);
/*
* If there is less than groupwidth drives available after the group
* start, the group is going to wrap onto the next row. 'wrap' is the
* group disk number that starts on the next row.
*/
uint64_t ndisks = vdc->vdc_ndisks;
uint64_t groupwidth = vdc->vdc_groupwidth;
uint64_t wrap = groupwidth;
if (groupstart + groupwidth > ndisks)
wrap = ndisks - groupstart;
/* The io size in units of the vdev's minimum sector size. */
const uint64_t psize = io_size >> ashift;
/*
* "Quotient": The number of data sectors for this stripe on all but
* the "big column" child vdevs that also contain "remainder" data.
*/
uint64_t q = psize / vdc->vdc_ndata;
/*
* "Remainder": The number of partial stripe data sectors in this I/O.
* This will add a sector to some, but not all, child vdevs.
*/
uint64_t r = psize - q * vdc->vdc_ndata;
/* The number of "big columns" - those which contain remainder data. */
uint64_t bc = (r == 0 ? 0 : r + vdc->vdc_nparity);
ASSERT3U(bc, <, groupwidth);
/* The total number of data and parity sectors for this I/O. */
uint64_t tot = psize + (vdc->vdc_nparity * (q + (r == 0 ? 0 : 1)));
ASSERT3U(vdc->vdc_nparity, >, 0);
raidz_row_t *rr = vdev_raidz_row_alloc(groupwidth, zio);
rr->rr_bigcols = bc;
rr->rr_firstdatacol = vdc->vdc_nparity;
#ifdef ZFS_DEBUG
rr->rr_offset = io_offset;
rr->rr_size = io_size;
#endif
*rrp = rr;
uint8_t *base;
uint64_t iter, asize = 0;
vdev_draid_get_perm(vdc, perm, &base, &iter);
for (uint64_t i = 0; i < groupwidth; i++) {
raidz_col_t *rc = &rr->rr_col[i];
uint64_t c = (groupstart + i) % ndisks;
/* increment the offset if we wrap to the next row */
if (i == wrap)
physical_offset += VDEV_DRAID_ROWHEIGHT;
rc->rc_devidx = vdev_draid_permute_id(vdc, base, iter, c);
rc->rc_offset = physical_offset;
if (q == 0 && i >= bc)
rc->rc_size = 0;
else if (i < bc)
rc->rc_size = (q + 1) << ashift;
else
rc->rc_size = q << ashift;
asize += rc->rc_size;
}
ASSERT3U(asize, ==, tot << ashift);
rr->rr_nempty = roundup(tot, groupwidth) - tot;
IMPLY(bc > 0, rr->rr_nempty == groupwidth - bc);
/* Allocate buffers for the parity columns */
for (uint64_t c = 0; c < rr->rr_firstdatacol; c++) {
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
}
/*
* Map buffers for data columns and allocate/map buffers for skip
* sectors. There are three distinct cases for dRAID which are
* required to support sequential rebuild.
*/
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_draid_map_alloc_write(zio, abd_offset, rr);
} else if ((rr->rr_nempty > 0) &&
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
vdev_draid_map_alloc_scrub(zio, abd_offset, rr);
} else {
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
vdev_draid_map_alloc_read(zio, abd_offset, rr);
}
return (io_size);
}
/*
* Allocate the raidz mapping to be applied to the dRAID I/O. The parity
* calculations for dRAID are identical to raidz however there are a few
* differences in the layout.
*
* - dRAID always allocates a full stripe width. Any extra sectors due
* this padding are zero filled and written to disk. They will be read
* back during a scrub or repair operation since they are included in
* the parity calculation. This property enables sequential resilvering.
*
* - When the block at the logical offset spans redundancy groups then two
* rows are allocated in the raidz_map_t. One row resides at the end of
* the first group and the other at the start of the following group.
*/
static raidz_map_t *
vdev_draid_map_alloc(zio_t *zio)
{
raidz_row_t *rr[2];
uint64_t abd_offset = 0;
uint64_t abd_size = zio->io_size;
uint64_t io_offset = zio->io_offset;
uint64_t size;
int nrows = 1;
size = vdev_draid_map_alloc_row(zio, &rr[0], io_offset,
abd_offset, abd_size);
if (size < abd_size) {
vdev_t *vd = zio->io_vd;
io_offset += vdev_draid_asize(vd, size, 0);
abd_offset += size;
abd_size -= size;
nrows++;
ASSERT3U(io_offset, ==, vdev_draid_group_to_offset(
vd, vdev_draid_offset_to_group(vd, io_offset)));
ASSERT3U(abd_offset, <, zio->io_size);
ASSERT3U(abd_size, !=, 0);
size = vdev_draid_map_alloc_row(zio, &rr[1],
io_offset, abd_offset, abd_size);
VERIFY3U(size, ==, abd_size);
}
raidz_map_t *rm;
rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[nrows]), KM_SLEEP);
rm->rm_ops = vdev_raidz_math_get_ops();
rm->rm_nrows = nrows;
rm->rm_row[0] = rr[0];
if (nrows == 2)
rm->rm_row[1] = rr[1];
return (rm);
}
/*
* Given an offset into a dRAID return the next group width aligned offset
* which can be used to start an allocation.
*/
static uint64_t
vdev_draid_get_astart(vdev_t *vd, const uint64_t start)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (roundup(start, vdc->vdc_groupwidth << vd->vdev_ashift));
}
/*
* Allocatable space for dRAID is (children - nspares) * sizeof(smallest child)
* rounded down to the last full slice. So each child must provide at least
* 1 / (children - nspares) of its asize.
*/
static uint64_t
vdev_draid_min_asize(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (VDEV_DRAID_REFLOW_RESERVE +
(vd->vdev_min_asize + vdc->vdc_ndisks - 1) / (vdc->vdc_ndisks));
}
/*
* When using dRAID the minimum allocation size is determined by the number
* of data disks in the redundancy group. Full stripes are always used.
*/
static uint64_t
vdev_draid_min_alloc(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (vdc->vdc_ndata << vd->vdev_ashift);
}
/*
* Returns true if the txg range does not exist on any leaf vdev.
*
* A dRAID spare does not fit into the DTL model. While it has child vdevs
* there is no redundancy among them, and the effective child vdev is
* determined by offset. Essentially we do a vdev_dtl_reassess() on the
* fly by replacing a dRAID spare with the child vdev under the offset.
* Note that it is a recursive process because the child vdev can be
* another dRAID spare and so on.
*/
boolean_t
vdev_draid_missing(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
uint64_t size)
{
if (vd->vdev_ops == &vdev_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops) {
/*
* Check all of the readable children, if any child
* contains the txg range the data it is not missing.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (!vdev_readable(cvd))
continue;
if (!vdev_draid_missing(cvd, physical_offset,
txg, size))
return (B_FALSE);
}
return (B_TRUE);
}
if (vd->vdev_ops == &vdev_draid_spare_ops) {
/*
* When sequentially resilvering we don't have a proper
* txg range so instead we must presume all txgs are
* missing on this vdev until the resilver completes.
*/
if (vd->vdev_rebuild_txg != 0)
return (B_TRUE);
/*
* DTL_MISSING is set for all prior txgs when a resilver
* is started in spa_vdev_attach().
*/
if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
return (B_TRUE);
/*
* Consult the DTL on the relevant vdev. Either a vdev
* leaf or spare/replace mirror child may be returned so
* we must recursively call vdev_draid_missing_impl().
*/
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_TRUE);
return (vdev_draid_missing(vd, physical_offset,
txg, size));
}
return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
}
/*
* Returns true if the txg is only partially replicated on the leaf vdevs.
*/
static boolean_t
vdev_draid_partial(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
uint64_t size)
{
if (vd->vdev_ops == &vdev_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops) {
/*
* Check all of the readable children, if any child is
* missing the txg range then it is partially replicated.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (!vdev_readable(cvd))
continue;
if (vdev_draid_partial(cvd, physical_offset, txg, size))
return (B_TRUE);
}
return (B_FALSE);
}
if (vd->vdev_ops == &vdev_draid_spare_ops) {
/*
* When sequentially resilvering we don't have a proper
* txg range so instead we must presume all txgs are
* missing on this vdev until the resilver completes.
*/
if (vd->vdev_rebuild_txg != 0)
return (B_TRUE);
/*
* DTL_MISSING is set for all prior txgs when a resilver
* is started in spa_vdev_attach().
*/
if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
return (B_TRUE);
/*
* Consult the DTL on the relevant vdev. Either a vdev
* leaf or spare/replace mirror child may be returned so
* we must recursively call vdev_draid_missing_impl().
*/
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_TRUE);
return (vdev_draid_partial(vd, physical_offset, txg, size));
}
return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
}
/*
* Determine if the vdev is readable at the given offset.
*/
boolean_t
vdev_draid_readable(vdev_t *vd, uint64_t physical_offset)
{
if (vd->vdev_ops == &vdev_draid_spare_ops) {
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_FALSE);
}
if (vd->vdev_ops == &vdev_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops) {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (!vdev_readable(cvd))
continue;
if (vdev_draid_readable(cvd, physical_offset))
return (B_TRUE);
}
return (B_FALSE);
}
return (vdev_readable(vd));
}
/*
* Returns the first distributed spare found under the provided vdev tree.
*/
static vdev_t *
vdev_draid_find_spare(vdev_t *vd)
{
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (vd);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *svd = vdev_draid_find_spare(vd->vdev_child[c]);
if (svd != NULL)
return (svd);
}
return (NULL);
}
/*
* Returns B_TRUE if the passed in vdev is currently "faulted".
* Faulted, in this context, means that the vdev represents a
* replacing or sparing vdev tree.
*/
static boolean_t
vdev_draid_faulted(vdev_t *vd, uint64_t physical_offset)
{
if (vd->vdev_ops == &vdev_draid_spare_ops) {
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_FALSE);
/*
* After resolving the distributed spare to a leaf vdev
* check the parent to determine if it's "faulted".
*/
vd = vd->vdev_parent;
}
return (vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops);
}
/*
* Determine if the dRAID block at the logical offset is degraded.
* Used by sequential resilver.
*/
static boolean_t
vdev_draid_group_degraded(vdev_t *vd, uint64_t offset)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
uint64_t groupstart, perm;
uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
offset, &perm, &groupstart);
uint8_t *base;
uint64_t iter;
vdev_draid_get_perm(vdc, perm, &base, &iter);
for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
vdev_t *cvd = vd->vdev_child[cid];
/* Group contains a faulted vdev. */
if (vdev_draid_faulted(cvd, physical_offset))
return (B_TRUE);
/*
* Always check groups with active distributed spares
* because any vdev failure in the pool will affect them.
*/
if (vdev_draid_find_spare(cvd) != NULL)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Determine if the txg is missing. Used by healing resilver.
*/
static boolean_t
vdev_draid_group_missing(vdev_t *vd, uint64_t offset, uint64_t txg,
uint64_t size)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
uint64_t groupstart, perm;
uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
offset, &perm, &groupstart);
uint8_t *base;
uint64_t iter;
vdev_draid_get_perm(vdc, perm, &base, &iter);
for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
vdev_t *cvd = vd->vdev_child[cid];
/* Transaction group is known to be partially replicated. */
if (vdev_draid_partial(cvd, physical_offset, txg, size))
return (B_TRUE);
/*
* Always check groups with active distributed spares
* because any vdev failure in the pool will affect them.
*/
if (vdev_draid_find_spare(cvd) != NULL)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Find the smallest child asize and largest sector size to calculate the
* available capacity. Distributed spares are ignored since their capacity
* is also based of the minimum child size in the top-level dRAID.
*/
static void
vdev_draid_calculate_asize(vdev_t *vd, uint64_t *asizep, uint64_t *max_asizep,
uint64_t *logical_ashiftp, uint64_t *physical_ashiftp)
{
uint64_t logical_ashift = 0, physical_ashift = 0;
uint64_t asize = 0, max_asize = 0;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_ops == &vdev_draid_spare_ops)
continue;
asize = MIN(asize - 1, cvd->vdev_asize - 1) + 1;
max_asize = MIN(max_asize - 1, cvd->vdev_max_asize - 1) + 1;
logical_ashift = MAX(logical_ashift, cvd->vdev_ashift);
}
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_ops == &vdev_draid_spare_ops)
continue;
physical_ashift = vdev_best_ashift(logical_ashift,
physical_ashift, cvd->vdev_physical_ashift);
}
*asizep = asize;
*max_asizep = max_asize;
*logical_ashiftp = logical_ashift;
*physical_ashiftp = physical_ashift;
}
/*
* Open spare vdevs.
*/
static boolean_t
vdev_draid_open_spares(vdev_t *vd)
{
return (vd->vdev_ops == &vdev_draid_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops);
}
/*
* Open all children, excluding spares.
*/
static boolean_t
vdev_draid_open_children(vdev_t *vd)
{
return (!vdev_draid_open_spares(vd));
}
/*
* Open a top-level dRAID vdev.
*/
static int
vdev_draid_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
uint64_t nparity = vdc->vdc_nparity;
int open_errors = 0;
if (nparity > VDEV_DRAID_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
/*
* First open the normal children then the distributed spares. This
* ordering is important to ensure the distributed spares calculate
* the correct psize in the event that the dRAID vdevs were expanded.
*/
vdev_open_children_subset(vd, vdev_draid_open_children);
vdev_open_children_subset(vd, vdev_draid_open_spares);
/* Verify enough of the children are available to continue. */
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_open_error != 0) {
if ((++open_errors) > nparity) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (SET_ERROR(ENXIO));
}
}
}
/*
* Allocatable capacity is the sum of the space on all children less
* the number of distributed spares rounded down to last full row
* and then to the last full group. An additional 32MB of scratch
* space is reserved at the end of each child for use by the dRAID
* expansion feature.
*/
uint64_t child_asize, child_max_asize;
vdev_draid_calculate_asize(vd, &child_asize, &child_max_asize,
logical_ashift, physical_ashift);
/*
* Should be unreachable since the minimum child size is 64MB, but
* we want to make sure an underflow absolutely cannot occur here.
*/
if (child_asize < VDEV_DRAID_REFLOW_RESERVE ||
child_max_asize < VDEV_DRAID_REFLOW_RESERVE) {
return (SET_ERROR(ENXIO));
}
child_asize = ((child_asize - VDEV_DRAID_REFLOW_RESERVE) /
VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
child_max_asize = ((child_max_asize - VDEV_DRAID_REFLOW_RESERVE) /
VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
*asize = (((child_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
vdc->vdc_groupsz);
*max_asize = (((child_max_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
vdc->vdc_groupsz);
return (0);
}
/*
* Close a top-level dRAID vdev.
*/
static void
vdev_draid_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c] != NULL)
vdev_close(vd->vdev_child[c]);
}
}
/*
* Return the maximum asize for a rebuild zio in the provided range
* given the following constraints. A dRAID chunks may not:
*
* - Exceed the maximum allowed block size (SPA_MAXBLOCKSIZE), or
* - Span dRAID redundancy groups.
*/
static uint64_t
vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
uint64_t max_segment)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
uint64_t ashift = vd->vdev_ashift;
uint64_t ndata = vdc->vdc_ndata;
uint64_t psize = MIN(P2ROUNDUP(max_segment * ndata, 1 << ashift),
SPA_MAXBLOCKSIZE);
ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0);
/* Chunks must evenly span all data columns in the group. */
psize = (((psize >> ashift) / ndata) * ndata) << ashift;
uint64_t chunk_size = MIN(asize, vdev_psize_to_asize(vd, psize));
/* Reduce the chunk size to the group space remaining. */
uint64_t group = vdev_draid_offset_to_group(vd, start);
uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
chunk_size = MIN(chunk_size, left);
ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0);
ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
vdev_draid_offset_to_group(vd, start + chunk_size - 1));
return (chunk_size);
}
/*
* Align the start of the metaslab to the group width and slightly reduce
* its size to a multiple of the group width. Since full stripe writes are
* required by dRAID this space is unallocable. Furthermore, aligning the
* metaslab start is important for vdev initialize and TRIM which both operate
* on metaslab boundaries which vdev_xlate() expects to be aligned.
*/
static void
vdev_draid_metaslab_init(vdev_t *vd, uint64_t *ms_start, uint64_t *ms_size)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
uint64_t sz = vdc->vdc_groupwidth << vd->vdev_ashift;
uint64_t astart = vdev_draid_get_astart(vd, *ms_start);
uint64_t asize = ((*ms_size - (astart - *ms_start)) / sz) * sz;
*ms_start = astart;
*ms_size = asize;
ASSERT0(*ms_start % sz);
ASSERT0(*ms_size % sz);
}
/*
* Add virtual dRAID spares to the list of valid spares. In order to accomplish
* this the existing array must be freed and reallocated with the additional
* entries.
*/
int
vdev_draid_spare_create(nvlist_t *nvroot, vdev_t *vd, uint64_t *ndraidp,
uint64_t next_vdev_id)
{
uint64_t draid_nspares = 0;
uint64_t ndraid = 0;
int error;
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_t *cvd = vd->vdev_child[i];
if (cvd->vdev_ops == &vdev_draid_ops) {
vdev_draid_config_t *vdc = cvd->vdev_tsd;
draid_nspares += vdc->vdc_nspares;
ndraid++;
}
}
if (draid_nspares == 0) {
*ndraidp = ndraid;
return (0);
}
nvlist_t **old_spares, **new_spares;
uint_t old_nspares;
error = nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&old_spares, &old_nspares);
if (error)
old_nspares = 0;
/* Allocate memory and copy of the existing spares. */
new_spares = kmem_alloc(sizeof (nvlist_t *) *
(draid_nspares + old_nspares), KM_SLEEP);
for (uint_t i = 0; i < old_nspares; i++)
new_spares[i] = fnvlist_dup(old_spares[i]);
/* Add new distributed spares to ZPOOL_CONFIG_SPARES. */
uint64_t n = old_nspares;
for (uint64_t vdev_id = 0; vdev_id < vd->vdev_children; vdev_id++) {
vdev_t *cvd = vd->vdev_child[vdev_id];
char path[64];
if (cvd->vdev_ops != &vdev_draid_ops)
continue;
vdev_draid_config_t *vdc = cvd->vdev_tsd;
uint64_t nspares = vdc->vdc_nspares;
uint64_t nparity = vdc->vdc_nparity;
for (uint64_t spare_id = 0; spare_id < nspares; spare_id++) {
memset(path, 0, sizeof (path));
(void) snprintf(path, sizeof (path) - 1,
"%s%llu-%llu-%llu", VDEV_TYPE_DRAID,
(u_longlong_t)nparity,
(u_longlong_t)next_vdev_id + vdev_id,
(u_longlong_t)spare_id);
nvlist_t *spare = fnvlist_alloc();
fnvlist_add_string(spare, ZPOOL_CONFIG_PATH, path);
fnvlist_add_string(spare, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_DRAID_SPARE);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_TOP_GUID,
cvd->vdev_guid);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_SPARE_ID,
spare_id);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_LOG, 0);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_SPARE, 1);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_WHOLE_DISK, 1);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_ASHIFT,
cvd->vdev_ashift);
new_spares[n] = spare;
n++;
}
}
if (n > 0) {
(void) nvlist_remove_all(nvroot, ZPOOL_CONFIG_SPARES);
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
(const nvlist_t **)new_spares, n);
}
for (int i = 0; i < n; i++)
nvlist_free(new_spares[i]);
kmem_free(new_spares, sizeof (*new_spares) * n);
*ndraidp = ndraid;
return (0);
}
/*
* Determine if any portion of the provided block resides on a child vdev
* with a dirty DTL and therefore needs to be resilvered.
*/
static boolean_t
vdev_draid_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = vdev_draid_asize(vd, psize, 0);
if (phys_birth == TXG_UNKNOWN) {
/*
* Sequential resilver. There is no meaningful phys_birth
* for this block, we can only determine if block resides
* in a degraded group in which case it must be resilvered.
*/
ASSERT3U(vdev_draid_offset_to_group(vd, offset), ==,
vdev_draid_offset_to_group(vd, offset + asize - 1));
return (vdev_draid_group_degraded(vd, offset));
} else {
/*
* Healing resilver. TXGs not in DTL_PARTIAL are intact,
* as are blocks in non-degraded groups.
*/
if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
return (B_FALSE);
if (vdev_draid_group_missing(vd, offset, phys_birth, 1))
return (B_TRUE);
/* The block may span groups in which case check both. */
if (vdev_draid_offset_to_group(vd, offset) !=
vdev_draid_offset_to_group(vd, offset + asize - 1)) {
if (vdev_draid_group_missing(vd,
offset + asize, phys_birth, 1))
return (B_TRUE);
}
return (B_FALSE);
}
}
static boolean_t
vdev_draid_rebuilding(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (vdev_draid_rebuilding(vd->vdev_child[i])) {
return (B_TRUE);
}
}
return (B_FALSE);
}
static void
vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
{
#ifdef ZFS_DEBUG
- range_seg64_t logical_rs, physical_rs, remain_rs;
+ zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = rr->rr_offset;
logical_rs.rs_end = logical_rs.rs_start +
vdev_draid_asize(vd, rr->rr_size, 0);
raidz_col_t *rc = &rr->rr_col[col];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
ASSERT(vdev_xlate_is_empty(&remain_rs));
ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
ASSERT3U(rc->rc_offset + rc->rc_size, ==, physical_rs.rs_end);
#endif
}
/*
* For write operations:
* 1. Generate the parity data
* 2. Create child zio write operations to each column's vdev, for both
* data and parity. A gang ABD is allocated by vdev_draid_map_alloc()
* if a skip sector needs to be added to a column.
*/
static void
vdev_draid_io_start_write(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_generate_parity_row(rm, rr);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
/*
* Empty columns are zero filled and included in the parity
* calculation and therefore must be written.
*/
ASSERT3U(rc->rc_size, !=, 0);
/* Verify physical to logical translation */
vdev_draid_io_verify(vd, rr, c);
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[rc->rc_devidx], rc->rc_offset,
rc->rc_abd, rc->rc_size, zio->io_type, zio->io_priority,
0, vdev_raidz_child_done, rc));
}
}
/*
* For read operations:
* 1. The vdev_draid_map_alloc() function will create a minimal raidz
* mapping for the read based on the zio->io_flags. There are two
* possible mappings either 1) a normal read, or 2) a scrub/resilver.
* 2. Create the zio read operations. This will include all parity
* columns and skip sectors for a scrub/resilver.
*/
static void
vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
/* Sequential rebuild must do IO at redundancy group boundary. */
IMPLY(zio->io_priority == ZIO_PRIORITY_REBUILD, rr->rr_nempty == 0);
/*
* Iterate over the columns in reverse order so that we hit the parity
* last. Any errors along the way will force us to read the parity.
* For scrub/resilver IOs which verify skip sectors, a gang ABD will
* have been allocated to store them and rc->rc_size is increased.
*/
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!vdev_draid_readable(cvd, rc->rc_offset)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1;
rc->rc_skipped = 1;
continue;
}
if (vdev_draid_missing(cvd, rc->rc_offset, zio->io_txg, 1)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
/*
* Empty columns may be read during vdev_draid_io_done().
* Only skip them after the readable and missing checks
* verify they are available.
*/
if (rc->rc_size == 0) {
rc->rc_skipped = 1;
continue;
}
if (zio->io_flags & ZIO_FLAG_RESILVER) {
vdev_t *svd;
/*
* Sequential rebuilds need to always consider the data
* on the child being rebuilt to be stale. This is
* important when all columns are available to aid
* known reconstruction in identifing which columns
* contain incorrect data.
*
* Furthermore, all repairs need to be constrained to
* the devices being rebuilt because without a checksum
* we cannot verify the data is actually correct and
* performing an incorrect repair could result in
* locking in damage and making the data unrecoverable.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
if (vdev_draid_rebuilding(cvd)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
rc->rc_allow_repair = 1;
continue;
} else {
rc->rc_allow_repair = 0;
}
} else {
rc->rc_allow_repair = 1;
}
/*
* If this child is a distributed spare then the
* offset might reside on the vdev being replaced.
* In which case this data must be written to the
* new device. Failure to do so would result in
* checksum errors when the old device is detached
* and the pool is scrubbed.
*/
if ((svd = vdev_draid_find_spare(cvd)) != NULL) {
svd = vdev_draid_spare_get_child(svd,
rc->rc_offset);
if (svd && (svd->vdev_ops == &vdev_spare_ops ||
svd->vdev_ops == &vdev_replacing_ops)) {
rc->rc_force_repair = 1;
if (vdev_draid_rebuilding(svd))
rc->rc_allow_repair = 1;
}
}
/*
* Always issue a repair IO to this child when its
* a spare or replacing vdev with an active rebuild.
*/
if ((cvd->vdev_ops == &vdev_spare_ops ||
cvd->vdev_ops == &vdev_replacing_ops) &&
vdev_draid_rebuilding(cvd)) {
rc->rc_force_repair = 1;
rc->rc_allow_repair = 1;
}
}
}
/*
* Either a parity or data column is missing this means a repair
* may be attempted by vdev_draid_io_done(). Expand the raid map
* to read in empty columns which are needed along with the parity
* during reconstruction.
*/
if ((rr->rr_missingdata > 0 || rr->rr_missingparity > 0) &&
rr->rr_nempty > 0 && rr->rr_abd_empty == NULL) {
vdev_draid_map_alloc_empty(zio, rr);
}
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (rc->rc_error || rc->rc_size == 0)
continue;
if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
}
}
}
/*
* Start an IO operation to a dRAID vdev.
*/
static void
vdev_draid_io_start(zio_t *zio)
{
vdev_t *vd __maybe_unused = zio->io_vd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(zio->io_offset, ==, vdev_draid_get_astart(vd, zio->io_offset));
raidz_map_t *rm = vdev_draid_map_alloc(zio);
zio->io_vsd = rm;
zio->io_vsd_ops = &vdev_raidz_vsd_ops;
if (zio->io_type == ZIO_TYPE_WRITE) {
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_draid_io_start_write(zio, rm->rm_row[i]);
}
} else {
ASSERT(zio->io_type == ZIO_TYPE_READ);
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_draid_io_start_read(zio, rm->rm_row[i]);
}
}
zio_execute(zio);
}
/*
* Complete an IO operation on a dRAID vdev. The raidz logic can be applied
* to dRAID since the layout is fully described by the raidz_map_t.
*/
static void
vdev_draid_io_done(zio_t *zio)
{
vdev_raidz_io_done(zio);
}
static void
vdev_draid_state_change(vdev_t *vd, int faulted, int degraded)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT(vd->vdev_ops == &vdev_draid_ops);
if (faulted > vdc->vdc_nparity)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
else if (degraded + faulted != 0)
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
else
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
static void
-vdev_draid_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
- range_seg64_t *physical_rs, range_seg64_t *remain_rs)
+vdev_draid_xlate(vdev_t *cvd, const zfs_range_seg64_t *logical_rs,
+ zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
{
vdev_t *raidvd = cvd->vdev_parent;
ASSERT(raidvd->vdev_ops == &vdev_draid_ops);
vdev_draid_config_t *vdc = raidvd->vdev_tsd;
uint64_t ashift = raidvd->vdev_top->vdev_ashift;
/* Make sure the offsets are block-aligned */
ASSERT0(logical_rs->rs_start % (1 << ashift));
ASSERT0(logical_rs->rs_end % (1 << ashift));
uint64_t logical_start = logical_rs->rs_start;
uint64_t logical_end = logical_rs->rs_end;
/*
* Unaligned ranges must be skipped. All metaslabs are correctly
* aligned so this should not happen, but this case is handled in
* case it's needed by future callers.
*/
uint64_t astart = vdev_draid_get_astart(raidvd, logical_start);
if (astart != logical_start) {
physical_rs->rs_start = logical_start;
physical_rs->rs_end = logical_start;
remain_rs->rs_start = MIN(astart, logical_end);
remain_rs->rs_end = logical_end;
return;
}
/*
* Unlike with mirrors and raidz a dRAID logical range can map
* to multiple non-contiguous physical ranges. This is handled by
* limiting the size of the logical range to a single group and
* setting the remain argument such that it describes the remaining
* unmapped logical range. This is stricter than absolutely
* necessary but helps simplify the logic below.
*/
uint64_t group = vdev_draid_offset_to_group(raidvd, logical_start);
uint64_t nextstart = vdev_draid_group_to_offset(raidvd, group + 1);
if (logical_end > nextstart)
logical_end = nextstart;
/* Find the starting offset for each vdev in the group */
uint64_t perm, groupstart;
uint64_t start = vdev_draid_logical_to_physical(raidvd,
logical_start, &perm, &groupstart);
uint64_t end = start;
uint8_t *base;
uint64_t iter, id;
vdev_draid_get_perm(vdc, perm, &base, &iter);
/*
* Check if the passed child falls within the group. If it does
* update the start and end to reflect the physical range.
* Otherwise, leave them unmodified which will result in an empty
* (zero-length) physical range being returned.
*/
for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
if (c == 0 && i != 0) {
/* the group wrapped, increment the start */
start += VDEV_DRAID_ROWHEIGHT;
end = start;
}
id = vdev_draid_permute_id(vdc, base, iter, c);
if (id == cvd->vdev_id) {
uint64_t b_size = (logical_end >> ashift) -
(logical_start >> ashift);
ASSERT3U(b_size, >, 0);
end = start + ((((b_size - 1) /
vdc->vdc_groupwidth) + 1) << ashift);
break;
}
}
physical_rs->rs_start = start;
physical_rs->rs_end = end;
/*
* Only top-level vdevs are allowed to set remain_rs because
* when .vdev_op_xlate() is called for their children the full
* logical range is not provided by vdev_xlate().
*/
remain_rs->rs_start = logical_end;
remain_rs->rs_end = logical_rs->rs_end;
ASSERT3U(physical_rs->rs_start, <=, logical_start);
ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
logical_end - logical_start);
}
/*
* Add dRAID specific fields to the config nvlist.
*/
static void
vdev_draid_config_generate(vdev_t *vd, nvlist_t *nv)
{
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
vdev_draid_config_t *vdc = vd->vdev_tsd;
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdc->vdc_nparity);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, vdc->vdc_ndata);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, vdc->vdc_nspares);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, vdc->vdc_ngroups);
}
/*
* Initialize private dRAID specific fields from the nvlist.
*/
static int
vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
(void) spa;
uint64_t ndata, nparity, nspares, ngroups;
int error;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, &ndata))
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) ||
nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
return (SET_ERROR(EINVAL));
}
uint_t children;
nvlist_t **child;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children == 0 ||
children > VDEV_DRAID_MAX_CHILDREN) {
return (SET_ERROR(EINVAL));
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, &nspares) ||
nspares > 100 || nspares > (children - (ndata + nparity))) {
return (SET_ERROR(EINVAL));
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, &ngroups) ||
ngroups == 0 || ngroups > VDEV_DRAID_MAX_CHILDREN) {
return (SET_ERROR(EINVAL));
}
/*
* Validate the minimum number of children exist per group for the
* specified parity level (draid1 >= 2, draid2 >= 3, draid3 >= 4).
*/
if (children < (ndata + nparity + nspares))
return (SET_ERROR(EINVAL));
/*
* Create the dRAID configuration using the pool nvlist configuration
* and the fixed mapping for the correct number of children.
*/
vdev_draid_config_t *vdc;
const draid_map_t *map;
error = vdev_draid_lookup_map(children, &map);
if (error)
return (SET_ERROR(EINVAL));
vdc = kmem_zalloc(sizeof (*vdc), KM_SLEEP);
vdc->vdc_ndata = ndata;
vdc->vdc_nparity = nparity;
vdc->vdc_nspares = nspares;
vdc->vdc_children = children;
vdc->vdc_ngroups = ngroups;
vdc->vdc_nperms = map->dm_nperms;
error = vdev_draid_generate_perms(map, &vdc->vdc_perms);
if (error) {
kmem_free(vdc, sizeof (*vdc));
return (SET_ERROR(EINVAL));
}
/*
* Derived constants.
*/
vdc->vdc_groupwidth = vdc->vdc_ndata + vdc->vdc_nparity;
vdc->vdc_ndisks = vdc->vdc_children - vdc->vdc_nspares;
vdc->vdc_groupsz = vdc->vdc_groupwidth * VDEV_DRAID_ROWHEIGHT;
vdc->vdc_devslicesz = (vdc->vdc_groupsz * vdc->vdc_ngroups) /
vdc->vdc_ndisks;
ASSERT3U(vdc->vdc_groupwidth, >=, 2);
ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0);
ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
vdc->vdc_ndisks, ==, 0);
*tsd = vdc;
return (0);
}
static void
vdev_draid_fini(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
vmem_free(vdc->vdc_perms, sizeof (uint8_t) *
vdc->vdc_children * vdc->vdc_nperms);
kmem_free(vdc, sizeof (*vdc));
}
static uint64_t
vdev_draid_nparity(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
return (vdc->vdc_nparity);
}
static uint64_t
vdev_draid_ndisks(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
return (vdc->vdc_ndisks);
}
vdev_ops_t vdev_draid_ops = {
.vdev_op_init = vdev_draid_init,
.vdev_op_fini = vdev_draid_fini,
.vdev_op_open = vdev_draid_open,
.vdev_op_close = vdev_draid_close,
.vdev_op_asize = vdev_draid_asize,
.vdev_op_min_asize = vdev_draid_min_asize,
.vdev_op_min_alloc = vdev_draid_min_alloc,
.vdev_op_io_start = vdev_draid_io_start,
.vdev_op_io_done = vdev_draid_io_done,
.vdev_op_state_change = vdev_draid_state_change,
.vdev_op_need_resilver = vdev_draid_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_draid_xlate,
.vdev_op_rebuild_asize = vdev_draid_rebuild_asize,
.vdev_op_metaslab_init = vdev_draid_metaslab_init,
.vdev_op_config_generate = vdev_draid_config_generate,
.vdev_op_nparity = vdev_draid_nparity,
.vdev_op_ndisks = vdev_draid_ndisks,
.vdev_op_type = VDEV_TYPE_DRAID,
.vdev_op_leaf = B_FALSE,
};
/*
* A dRAID distributed spare is a virtual leaf vdev which is included in the
* parent dRAID configuration. The last N columns of the dRAID permutation
* table are used to determine on which dRAID children a specific offset
* should be written. These spare leaf vdevs can only be used to replace
* faulted children in the same dRAID configuration.
*/
/*
* Distributed spare state. All fields are set when the distributed spare is
* first opened and are immutable.
*/
typedef struct {
vdev_t *vds_draid_vdev; /* top-level parent dRAID vdev */
uint64_t vds_top_guid; /* top-level parent dRAID guid */
uint64_t vds_spare_id; /* spare id (0 - vdc->vdc_nspares-1) */
} vdev_draid_spare_t;
/*
* Returns the parent dRAID vdev to which the distributed spare belongs.
* This may be safely called even when the vdev is not open.
*/
vdev_t *
vdev_draid_spare_get_parent(vdev_t *vd)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
if (vds->vds_draid_vdev != NULL)
return (vds->vds_draid_vdev);
return (vdev_lookup_by_guid(vd->vdev_spa->spa_root_vdev,
vds->vds_top_guid));
}
/*
* A dRAID space is active when it's the child of a vdev using the
* vdev_spare_ops, vdev_replacing_ops or vdev_draid_ops.
*/
static boolean_t
vdev_draid_spare_is_active(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
if (pvd != NULL && (pvd->vdev_ops == &vdev_spare_ops ||
pvd->vdev_ops == &vdev_replacing_ops ||
pvd->vdev_ops == &vdev_draid_ops)) {
return (B_TRUE);
} else {
return (B_FALSE);
}
}
/*
* Given a dRAID distribute spare vdev, returns the physical child vdev
* on which the provided offset resides. This may involve recursing through
* multiple layers of distributed spares. Note that offset is relative to
* this vdev.
*/
vdev_t *
vdev_draid_spare_get_child(vdev_t *vd, uint64_t physical_offset)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
/* The vdev is closed */
if (vds->vds_draid_vdev == NULL)
return (NULL);
vdev_t *tvd = vds->vds_draid_vdev;
vdev_draid_config_t *vdc = tvd->vdev_tsd;
ASSERT3P(tvd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(vds->vds_spare_id, <, vdc->vdc_nspares);
uint8_t *base;
uint64_t iter;
uint64_t perm = physical_offset / vdc->vdc_devslicesz;
vdev_draid_get_perm(vdc, perm, &base, &iter);
uint64_t cid = vdev_draid_permute_id(vdc, base, iter,
(tvd->vdev_children - 1) - vds->vds_spare_id);
vdev_t *cvd = tvd->vdev_child[cid];
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return (vdev_draid_spare_get_child(cvd, physical_offset));
return (cvd);
}
static void
vdev_draid_spare_close(vdev_t *vd)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
vds->vds_draid_vdev = NULL;
}
/*
* Opening a dRAID spare device is done by looking up the associated dRAID
* top-level vdev guid from the spare configuration.
*/
static int
vdev_draid_spare_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
uint64_t asize, max_asize;
vdev_t *tvd = vdev_lookup_by_guid(rvd, vds->vds_top_guid);
if (tvd == NULL) {
/*
* When spa_vdev_add() is labeling new spares the
* associated dRAID is not attached to the root vdev
* nor does this spare have a parent. Simulate a valid
* device in order to allow the label to be initialized
* and the distributed spare added to the configuration.
*/
if (vd->vdev_parent == NULL) {
*psize = *max_psize = SPA_MINDEVSIZE;
*logical_ashift = *physical_ashift = ASHIFT_MIN;
return (0);
}
return (SET_ERROR(EINVAL));
}
vdev_draid_config_t *vdc = tvd->vdev_tsd;
if (tvd->vdev_ops != &vdev_draid_ops || vdc == NULL)
return (SET_ERROR(EINVAL));
if (vds->vds_spare_id >= vdc->vdc_nspares)
return (SET_ERROR(EINVAL));
/*
* Neither tvd->vdev_asize or tvd->vdev_max_asize can be used here
* because the caller may be vdev_draid_open() in which case the
* values are stale as they haven't yet been updated by vdev_open().
* To avoid this always recalculate the dRAID asize and max_asize.
*/
vdev_draid_calculate_asize(tvd, &asize, &max_asize,
logical_ashift, physical_ashift);
*psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
*max_psize = max_asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
vds->vds_draid_vdev = tvd;
return (0);
}
/*
* Completed distributed spare IO. Store the result in the parent zio
* as if it had performed the operation itself. Only the first error is
* preserved if there are multiple errors.
*/
static void
vdev_draid_spare_child_done(zio_t *zio)
{
zio_t *pio = zio->io_private;
/*
* IOs are issued to non-writable vdevs in order to keep their
* DTLs accurate. However, we don't want to propagate the
* error in to the distributed spare's DTL. When resilvering
* vdev_draid_need_resilver() will consult the relevant DTL
* to determine if the data is missing and must be repaired.
*/
if (!vdev_writeable(zio->io_vd))
return;
if (pio->io_error == 0)
pio->io_error = zio->io_error;
}
/*
* Returns a valid label nvlist for the distributed spare vdev. This is
* used to bypass the IO pipeline to avoid the complexity of constructing
* a complete label with valid checksum to return when read.
*/
nvlist_t *
vdev_draid_read_config_spare(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
spa_aux_vdev_t *sav = &spa->spa_spares;
uint64_t guid = vd->vdev_guid;
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VERSION, spa_version(spa));
fnvlist_add_string(nv, ZPOOL_CONFIG_POOL_NAME, spa_name(spa));
fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_STATE,
vdev_draid_spare_is_active(vd) ?
POOL_STATE_ACTIVE : POOL_STATE_SPARE);
/* Set the vdev guid based on the vdev list in sav_count. */
for (int i = 0; i < sav->sav_count; i++) {
if (sav->sav_vdevs[i]->vdev_ops == &vdev_draid_spare_ops &&
strcmp(sav->sav_vdevs[i]->vdev_path, vd->vdev_path) == 0) {
guid = sav->sav_vdevs[i]->vdev_guid;
break;
}
}
fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, guid);
return (nv);
}
/*
* Handle any flush requested of the distributed spare. All children must be
* flushed.
*/
static int
vdev_draid_spare_flush(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
int error = 0;
for (int c = 0; c < vd->vdev_children; c++) {
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[c], zio->io_offset, zio->io_abd,
zio->io_size, zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
return (error);
}
/*
* Initiate an IO to the distributed spare. For normal IOs this entails using
* the zio->io_offset and permutation table to calculate which child dRAID vdev
* is responsible for the data. Then passing along the zio to that child to
* perform the actual IO. The label ranges are not stored on disk and require
* some special handling which is described below.
*/
static void
vdev_draid_spare_io_start(zio_t *zio)
{
vdev_t *cvd = NULL, *vd = zio->io_vd;
vdev_draid_spare_t *vds = vd->vdev_tsd;
uint64_t offset = zio->io_offset - VDEV_LABEL_START_SIZE;
/*
* If the vdev is closed, it's likely in the REMOVED or FAULTED state.
* Nothing to be done here but return failure.
*/
if (vds == NULL) {
zio->io_error = ENXIO;
zio_interrupt(zio);
return;
}
switch (zio->io_type) {
case ZIO_TYPE_FLUSH:
zio->io_error = vdev_draid_spare_flush(zio);
break;
case ZIO_TYPE_WRITE:
if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
/*
* Accept probe IOs and config writers to simulate the
* existence of an on disk label. vdev_label_sync(),
* vdev_uberblock_sync() and vdev_copy_uberblocks()
* skip the distributed spares. This only leaves
* vdev_label_init() which is allowed to succeed to
* avoid adding special cases the function.
*/
if (zio->io_flags & ZIO_FLAG_PROBE ||
zio->io_flags & ZIO_FLAG_CONFIG_WRITER) {
zio->io_error = 0;
} else {
zio->io_error = SET_ERROR(EIO);
}
} else {
cvd = vdev_draid_spare_get_child(vd, offset);
if (cvd == NULL) {
zio->io_error = SET_ERROR(ENXIO);
} else {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
}
break;
case ZIO_TYPE_READ:
if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
/*
* Accept probe IOs to simulate the existence of a
* label. vdev_label_read_config() bypasses the
* pipeline to read the label configuration and
* vdev_uberblock_load() skips distributed spares
* when attempting to locate the best uberblock.
*/
if (zio->io_flags & ZIO_FLAG_PROBE) {
zio->io_error = 0;
} else {
zio->io_error = SET_ERROR(EIO);
}
} else {
cvd = vdev_draid_spare_get_child(vd, offset);
if (cvd == NULL || !vdev_readable(cvd)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
}
break;
case ZIO_TYPE_TRIM:
/* The vdev label ranges are never trimmed */
ASSERT0(VDEV_OFFSET_IS_LABEL(vd, zio->io_offset));
cvd = vdev_draid_spare_get_child(vd, offset);
if (cvd == NULL || !cvd->vdev_has_trim) {
zio->io_error = SET_ERROR(ENXIO);
} else {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
zio_execute(zio);
}
static void
vdev_draid_spare_io_done(zio_t *zio)
{
(void) zio;
}
/*
* Lookup the full spare config in spa->spa_spares.sav_config and
* return the top_guid and spare_id for the named spare.
*/
static int
vdev_draid_spare_lookup(spa_t *spa, nvlist_t *nv, uint64_t *top_guidp,
uint64_t *spare_idp)
{
nvlist_t **spares;
uint_t nspares;
int error;
if ((spa->spa_spares.sav_config == NULL) ||
(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)) {
return (SET_ERROR(ENOENT));
}
const char *spare_name;
error = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &spare_name);
if (error != 0)
return (SET_ERROR(EINVAL));
for (int i = 0; i < nspares; i++) {
nvlist_t *spare = spares[i];
uint64_t top_guid, spare_id;
const char *type, *path;
/* Skip non-distributed spares */
error = nvlist_lookup_string(spare, ZPOOL_CONFIG_TYPE, &type);
if (error != 0 || strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0)
continue;
/* Skip spares with the wrong name */
error = nvlist_lookup_string(spare, ZPOOL_CONFIG_PATH, &path);
if (error != 0 || strcmp(path, spare_name) != 0)
continue;
/* Found the matching spare */
error = nvlist_lookup_uint64(spare,
ZPOOL_CONFIG_TOP_GUID, &top_guid);
if (error == 0) {
error = nvlist_lookup_uint64(spare,
ZPOOL_CONFIG_SPARE_ID, &spare_id);
}
if (error != 0) {
return (SET_ERROR(EINVAL));
} else {
*top_guidp = top_guid;
*spare_idp = spare_id;
return (0);
}
}
return (SET_ERROR(ENOENT));
}
/*
* Initialize private dRAID spare specific fields from the nvlist.
*/
static int
vdev_draid_spare_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
vdev_draid_spare_t *vds;
uint64_t top_guid = 0;
uint64_t spare_id;
/*
* In the normal case check the list of spares stored in the spa
* to lookup the top_guid and spare_id for provided spare config.
* When creating a new pool or adding vdevs the spare list is not
* yet populated and the values are provided in the passed config.
*/
if (vdev_draid_spare_lookup(spa, nv, &top_guid, &spare_id) != 0) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_TOP_GUID,
&top_guid) != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_SPARE_ID,
&spare_id) != 0)
return (SET_ERROR(EINVAL));
}
vds = kmem_alloc(sizeof (vdev_draid_spare_t), KM_SLEEP);
vds->vds_draid_vdev = NULL;
vds->vds_top_guid = top_guid;
vds->vds_spare_id = spare_id;
*tsd = vds;
return (0);
}
static void
vdev_draid_spare_fini(vdev_t *vd)
{
kmem_free(vd->vdev_tsd, sizeof (vdev_draid_spare_t));
}
static void
vdev_draid_spare_config_generate(vdev_t *vd, nvlist_t *nv)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vds->vds_top_guid);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_SPARE_ID, vds->vds_spare_id);
}
vdev_ops_t vdev_draid_spare_ops = {
.vdev_op_init = vdev_draid_spare_init,
.vdev_op_fini = vdev_draid_spare_fini,
.vdev_op_open = vdev_draid_spare_open,
.vdev_op_close = vdev_draid_spare_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_draid_spare_io_start,
.vdev_op_io_done = vdev_draid_spare_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = vdev_draid_spare_config_generate,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DRAID_SPARE,
.vdev_op_leaf = B_TRUE,
};
diff --git a/sys/contrib/openzfs/module/zfs/vdev_indirect.c b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
index cd24f97ae7cd..46c1fed6d2c6 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_indirect.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
@@ -1,1923 +1,1923 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014, 2017 by Delphix. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2014, 2020 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/metaslab.h>
#include <sys/dmu.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_synctask.h>
#include <sys/zap.h>
#include <sys/abd.h>
#include <sys/zthr.h>
#include <sys/fm/fs/zfs.h>
/*
* An indirect vdev corresponds to a vdev that has been removed. Since
* we cannot rewrite block pointers of snapshots, etc., we keep a
* mapping from old location on the removed device to the new location
* on another device in the pool and use this mapping whenever we need
* to access the DVA. Unfortunately, this mapping did not respect
* logical block boundaries when it was first created, and so a DVA on
* this indirect vdev may be "split" into multiple sections that each
* map to a different location. As a consequence, not all DVAs can be
* translated to an equivalent new DVA. Instead we must provide a
* "vdev_remap" operation that executes a callback on each contiguous
* segment of the new location. This function is used in multiple ways:
*
* - I/Os to this vdev use the callback to determine where the
* data is now located, and issue child I/Os for each segment's new
* location.
*
* - frees and claims to this vdev use the callback to free or claim
* each mapped segment. (Note that we don't actually need to claim
* log blocks on indirect vdevs, because we don't allocate to
* removing vdevs. However, zdb uses zio_claim() for its leak
* detection.)
*/
/*
* "Big theory statement" for how we mark blocks obsolete.
*
* When a block on an indirect vdev is freed or remapped, a section of
* that vdev's mapping may no longer be referenced (aka "obsolete"). We
* keep track of how much of each mapping entry is obsolete. When
* an entry becomes completely obsolete, we can remove it, thus reducing
* the memory used by the mapping. The complete picture of obsolescence
* is given by the following data structures, described below:
* - the entry-specific obsolete count
* - the vdev-specific obsolete spacemap
* - the pool-specific obsolete bpobj
*
* == On disk data structures used ==
*
* We track the obsolete space for the pool using several objects. Each
* of these objects is created on demand and freed when no longer
* needed, and is assumed to be empty if it does not exist.
* SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
*
* - Each vic_mapping_object (associated with an indirect vdev) can
* have a vimp_counts_object. This is an array of uint32_t's
* with the same number of entries as the vic_mapping_object. When
* the mapping is condensed, entries from the vic_obsolete_sm_object
* (see below) are folded into the counts. Therefore, each
* obsolete_counts entry tells us the number of bytes in the
* corresponding mapping entry that were not referenced when the
* mapping was last condensed.
*
* - Each indirect or removing vdev can have a vic_obsolete_sm_object.
* This is a space map containing an alloc entry for every DVA that
* has been obsoleted since the last time this indirect vdev was
* condensed. We use this object in order to improve performance
* when marking a DVA as obsolete. Instead of modifying an arbitrary
* offset of the vimp_counts_object, we only need to append an entry
* to the end of this object. When a DVA becomes obsolete, it is
* added to the obsolete space map. This happens when the DVA is
* freed, remapped and not referenced by a snapshot, or the last
* snapshot referencing it is destroyed.
*
* - Each dataset can have a ds_remap_deadlist object. This is a
* deadlist object containing all blocks that were remapped in this
* dataset but referenced in a previous snapshot. Blocks can *only*
* appear on this list if they were remapped (dsl_dataset_block_remapped);
* blocks that were killed in a head dataset are put on the normal
* ds_deadlist and marked obsolete when they are freed.
*
* - The pool can have a dp_obsolete_bpobj. This is a list of blocks
* in the pool that need to be marked obsolete. When a snapshot is
* destroyed, we move some of the ds_remap_deadlist to the obsolete
* bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
* asynchronously process the obsolete bpobj, moving its entries to
* the specific vdevs' obsolete space maps.
*
* == Summary of how we mark blocks as obsolete ==
*
* - When freeing a block: if any DVA is on an indirect vdev, append to
* vic_obsolete_sm_object.
* - When remapping a block, add dva to ds_remap_deadlist (if prev snap
* references; otherwise append to vic_obsolete_sm_object).
* - When freeing a snapshot: move parts of ds_remap_deadlist to
* dp_obsolete_bpobj (same algorithm as ds_deadlist).
* - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
* individual vdev's vic_obsolete_sm_object.
*/
/*
* "Big theory statement" for how we condense indirect vdevs.
*
* Condensing an indirect vdev's mapping is the process of determining
* the precise counts of obsolete space for each mapping entry (by
* integrating the obsolete spacemap into the obsolete counts) and
* writing out a new mapping that contains only referenced entries.
*
* We condense a vdev when we expect the mapping to shrink (see
* vdev_indirect_should_condense()), but only perform one condense at a
* time to limit the memory usage. In addition, we use a separate
* open-context thread (spa_condense_indirect_thread) to incrementally
* create the new mapping object in a way that minimizes the impact on
* the rest of the system.
*
* == Generating a new mapping ==
*
* To generate a new mapping, we follow these steps:
*
* 1. Save the old obsolete space map and create a new mapping object
* (see spa_condense_indirect_start_sync()). This initializes the
* spa_condensing_indirect_phys with the "previous obsolete space map",
* which is now read only. Newly obsolete DVAs will be added to a
* new (initially empty) obsolete space map, and will not be
* considered as part of this condense operation.
*
* 2. Construct in memory the precise counts of obsolete space for each
* mapping entry, by incorporating the obsolete space map into the
* counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
*
* 3. Iterate through each mapping entry, writing to the new mapping any
* entries that are not completely obsolete (i.e. which don't have
* obsolete count == mapping length). (See
* spa_condense_indirect_generate_new_mapping().)
*
* 4. Destroy the old mapping object and switch over to the new one
* (spa_condense_indirect_complete_sync).
*
* == Restarting from failure ==
*
* To restart the condense when we import/open the pool, we must start
* at the 2nd step above: reconstruct the precise counts in memory,
* based on the space map + counts. Then in the 3rd step, we start
* iterating where we left off: at vimp_max_offset of the new mapping
* object.
*/
static int zfs_condense_indirect_vdevs_enable = B_TRUE;
/*
* Condense if at least this percent of the bytes in the mapping is
* obsolete. With the default of 25%, the amount of space mapped
* will be reduced to 1% of its original size after at most 16
* condenses. Higher values will condense less often (causing less
* i/o); lower values will reduce the mapping size more quickly.
*/
static uint_t zfs_condense_indirect_obsolete_pct = 25;
/*
* Condense if the obsolete space map takes up more than this amount of
* space on disk (logically). This limits the amount of disk space
* consumed by the obsolete space map; the default of 1GB is small enough
* that we typically don't mind "wasting" it.
*/
static uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
/*
* Don't bother condensing if the mapping uses less than this amount of
* memory. The default of 128KB is considered a "trivial" amount of
* memory and not worth reducing.
*/
static uint64_t zfs_condense_min_mapping_bytes = 128 * 1024;
/*
* This is used by the test suite so that it can ensure that certain
* actions happen while in the middle of a condense (which might otherwise
* complete too quickly). If used to reduce the performance impact of
* condensing in production, a maximum value of 1 should be sufficient.
*/
static uint_t zfs_condense_indirect_commit_entry_delay_ms = 0;
/*
* If an indirect split block contains more than this many possible unique
* combinations when being reconstructed, consider it too computationally
* expensive to check them all. Instead, try at most 100 randomly-selected
* combinations each time the block is accessed. This allows all segment
* copies to participate fairly in the reconstruction when all combinations
* cannot be checked and prevents repeated use of one bad copy.
*/
uint_t zfs_reconstruct_indirect_combinations_max = 4096;
/*
* Enable to simulate damaged segments and validate reconstruction. This
* is intentionally not exposed as a module parameter.
*/
unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
/*
* The indirect_child_t represents the vdev that we will read from, when we
* need to read all copies of the data (e.g. for scrub or reconstruction).
* For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
* ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
* ic_vdev is a child of the mirror.
*/
typedef struct indirect_child {
abd_t *ic_data;
vdev_t *ic_vdev;
/*
* ic_duplicate is NULL when the ic_data contents are unique, when it
* is determined to be a duplicate it references the primary child.
*/
struct indirect_child *ic_duplicate;
list_node_t ic_node; /* node on is_unique_child */
int ic_error; /* set when a child does not contain the data */
} indirect_child_t;
/*
* The indirect_split_t represents one mapped segment of an i/o to the
* indirect vdev. For non-split (contiguously-mapped) blocks, there will be
* only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
* For split blocks, there will be several of these.
*/
typedef struct indirect_split {
list_node_t is_node; /* link on iv_splits */
/*
* is_split_offset is the offset into the i/o.
* This is the sum of the previous splits' is_size's.
*/
uint64_t is_split_offset;
vdev_t *is_vdev; /* top-level vdev */
uint64_t is_target_offset; /* offset on is_vdev */
uint64_t is_size;
int is_children; /* number of entries in is_child[] */
int is_unique_children; /* number of entries in is_unique_child */
list_t is_unique_child;
/*
* is_good_child is the child that we are currently using to
* attempt reconstruction.
*/
indirect_child_t *is_good_child;
indirect_child_t is_child[];
} indirect_split_t;
/*
* The indirect_vsd_t is associated with each i/o to the indirect vdev.
* It is the "Vdev-Specific Data" in the zio_t's io_vsd.
*/
typedef struct indirect_vsd {
boolean_t iv_split_block;
boolean_t iv_reconstruct;
uint64_t iv_unique_combinations;
uint64_t iv_attempts;
uint64_t iv_attempts_max;
list_t iv_splits; /* list of indirect_split_t's */
} indirect_vsd_t;
static void
vdev_indirect_map_free(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
indirect_split_t *is;
while ((is = list_remove_head(&iv->iv_splits)) != NULL) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic->ic_data != NULL)
abd_free(ic->ic_data);
}
indirect_child_t *ic;
while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
;
list_destroy(&is->is_unique_child);
kmem_free(is,
offsetof(indirect_split_t, is_child[is->is_children]));
}
kmem_free(iv, sizeof (*iv));
}
static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
.vsd_free = vdev_indirect_map_free,
};
/*
* Mark the given offset and size as being obsolete.
*/
void
vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(size > 0);
VERIFY(vdev_indirect_mapping_entry_for_offset(
vd->vdev_indirect_mapping, offset) != NULL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
mutex_enter(&vd->vdev_obsolete_lock);
- range_tree_add(vd->vdev_obsolete_segments, offset, size);
+ zfs_range_tree_add(vd->vdev_obsolete_segments, offset, size);
mutex_exit(&vd->vdev_obsolete_lock);
vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
}
}
/*
* Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
* wrapper is provided because the DMU does not know about vdev_t's and
* cannot directly call vdev_indirect_mark_obsolete.
*/
void
spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
ASSERT(dmu_tx_is_syncing(tx));
/* The DMU can only remap indirect vdevs. */
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
vdev_indirect_mark_obsolete(vd, offset, size);
}
static spa_condensing_indirect_t *
spa_condensing_indirect_create(spa_t *spa)
{
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
objset_t *mos = spa->spa_meta_objset;
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&sci->sci_new_mapping_entries[i],
sizeof (vdev_indirect_mapping_entry_t),
offsetof(vdev_indirect_mapping_entry_t, vime_node));
}
sci->sci_new_mapping =
vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
return (sci);
}
static void
spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
{
for (int i = 0; i < TXG_SIZE; i++)
list_destroy(&sci->sci_new_mapping_entries[i]);
if (sci->sci_new_mapping != NULL)
vdev_indirect_mapping_close(sci->sci_new_mapping);
kmem_free(sci, sizeof (*sci));
}
boolean_t
vdev_indirect_should_condense(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
if (!zfs_condense_indirect_vdevs_enable)
return (B_FALSE);
/*
* We can only condense one indirect vdev at a time.
*/
if (spa->spa_condensing_indirect != NULL)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
/*
* The mapping object size must not change while we are
* condensing, so we can only condense indirect vdevs
* (not vdevs that are still in the middle of being removed).
*/
if (vd->vdev_ops != &vdev_indirect_ops)
return (B_FALSE);
/*
* If nothing new has been marked obsolete, there is no
* point in condensing.
*/
uint64_t obsolete_sm_obj __maybe_unused;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
if (vd->vdev_obsolete_sm == NULL) {
ASSERT0(obsolete_sm_obj);
return (B_FALSE);
}
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm));
uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
uint64_t mapping_size = vdev_indirect_mapping_size(vim);
uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
ASSERT3U(bytes_obsolete, <=, bytes_mapped);
/*
* If a high percentage of the bytes that are mapped have become
* obsolete, condense (unless the mapping is already small enough).
* This has a good chance of reducing the amount of memory used
* by the mapping.
*/
if (bytes_obsolete * 100 / bytes_mapped >=
zfs_condense_indirect_obsolete_pct &&
mapping_size > zfs_condense_min_mapping_bytes) {
zfs_dbgmsg("should condense vdev %llu because obsolete "
"spacemap covers %d%% of %lluMB mapping",
(u_longlong_t)vd->vdev_id,
(int)(bytes_obsolete * 100 / bytes_mapped),
(u_longlong_t)bytes_mapped / 1024 / 1024);
return (B_TRUE);
}
/*
* If the obsolete space map takes up too much space on disk,
* condense in order to free up this disk space.
*/
if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
zfs_dbgmsg("should condense vdev %llu because obsolete sm "
"length %lluMB >= max size %lluMB",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)obsolete_sm_size / 1024 / 1024,
(u_longlong_t)zfs_condense_max_obsolete_bytes /
1024 / 1024);
return (B_TRUE);
}
return (B_FALSE);
}
/*
* This sync task completes (finishes) a condense, deleting the old
* mapping and replacing it with the new one.
*/
static void
spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
objset_t *mos = spa->spa_meta_objset;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
uint64_t new_count =
vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
}
ASSERT(vic->vic_mapping_object != 0);
ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
ASSERT(scip->scip_next_mapping_object != 0);
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
/*
* Reset vdev_indirect_mapping to refer to the new object.
*/
rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vd->vdev_indirect_mapping = sci->sci_new_mapping;
rw_exit(&vd->vdev_indirect_rwlock);
sci->sci_new_mapping = NULL;
vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
vic->vic_mapping_object = scip->scip_next_mapping_object;
scip->scip_next_mapping_object = 0;
space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
scip->scip_prev_obsolete_sm_object = 0;
scip->scip_vdev = 0;
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, tx));
spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = NULL;
zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
"new mapping object %llu has %llu entries "
"(was %llu entries)",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)vic->vic_mapping_object,
(u_longlong_t)new_count, (u_longlong_t)old_count);
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* This sync task appends entries to the new mapping object.
*/
static void
spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa __maybe_unused = dmu_tx_pool(tx)->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
&sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
}
/*
* Open-context function to add one entry to the new mapping. The new
* entry will be remembered and written from syncing context.
*/
static void
spa_condense_indirect_commit_entry(spa_t *spa,
vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
{
spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
/*
* If we are the first entry committed this txg, kick off the sync
* task to write to the MOS on our behalf.
*/
if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
dsl_sync_task_nowait(dmu_tx_pool(tx),
spa_condense_indirect_commit_sync, sci, tx);
}
vdev_indirect_mapping_entry_t *vime =
kmem_alloc(sizeof (*vime), KM_SLEEP);
vime->vime_mapping = *vimep;
vime->vime_obsolete_count = count;
list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
dmu_tx_commit(tx);
}
static void
spa_condense_indirect_generate_new_mapping(vdev_t *vd,
uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
{
spa_t *spa = vd->vdev_spa;
uint64_t mapi = start_index;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
uint64_t old_num_entries =
vdev_indirect_mapping_num_entries(old_mapping);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
zfs_dbgmsg("starting condense of vdev %llu from index %llu",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)mapi);
while (mapi < old_num_entries) {
if (zthr_iscancelled(zthr)) {
zfs_dbgmsg("pausing condense of vdev %llu "
"at index %llu", (u_longlong_t)vd->vdev_id,
(u_longlong_t)mapi);
break;
}
vdev_indirect_mapping_entry_phys_t *entry =
&old_mapping->vim_entries[mapi];
uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
ASSERT3U(obsolete_counts[mapi], <=, entry_size);
if (obsolete_counts[mapi] < entry_size) {
spa_condense_indirect_commit_entry(spa, entry,
obsolete_counts[mapi]);
/*
* This delay may be requested for testing, debugging,
* or performance reasons.
*/
hrtime_t now = gethrtime();
hrtime_t sleep_until = now + MSEC2NSEC(
zfs_condense_indirect_commit_entry_delay_ms);
zfs_sleep_until(sleep_until);
}
mapi++;
}
}
static boolean_t
spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
{
(void) zthr;
spa_t *spa = arg;
return (spa->spa_condensing_indirect != NULL);
}
static void
spa_condense_indirect_thread(void *arg, zthr_t *zthr)
{
spa_t *spa = arg;
vdev_t *vd;
ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint32_t *counts;
uint64_t start_index;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
space_map_t *prev_obsolete_sm = NULL;
ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
ASSERT(scip->scip_next_mapping_object != 0);
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
for (int i = 0; i < TXG_SIZE; i++) {
/*
* The list must start out empty in order for the
* _commit_sync() sync task to be properly registered
* on the first call to _commit_entry(); so it's wise
* to double check and ensure we actually are starting
* with empty lists.
*/
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
}
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
if (prev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
counts, prev_obsolete_sm);
}
space_map_close(prev_obsolete_sm);
/*
* Generate new mapping. Determine what index to continue from
* based on the max offset that we've already written in the
* new mapping.
*/
uint64_t max_offset =
vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
if (max_offset == 0) {
/* We haven't written anything to the new mapping yet. */
start_index = 0;
} else {
/*
* Pick up from where we left off. _entry_for_offset()
* returns a pointer into the vim_entries array. If
* max_offset is greater than any of the mappings
* contained in the table NULL will be returned and
* that indicates we've exhausted our iteration of the
* old_mapping.
*/
vdev_indirect_mapping_entry_phys_t *entry =
vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
max_offset);
if (entry == NULL) {
/*
* We've already written the whole new mapping.
* This special value will cause us to skip the
* generate_new_mapping step and just do the sync
* task to complete the condense.
*/
start_index = UINT64_MAX;
} else {
start_index = entry - old_mapping->vim_entries;
ASSERT3U(start_index, <,
vdev_indirect_mapping_num_entries(old_mapping));
}
}
spa_condense_indirect_generate_new_mapping(vd, counts,
start_index, zthr);
vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
/*
* If the zthr has received a cancellation signal while running
* in generate_new_mapping() or at any point after that, then bail
* early. We don't want to complete the condense if the spa is
* shutting down.
*/
if (zthr_iscancelled(zthr))
return;
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
spa_condense_indirect_complete_sync, sci, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
/*
* Sync task to begin the condensing process.
*/
void
spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
ASSERT0(scip->scip_next_mapping_object);
ASSERT0(scip->scip_prev_obsolete_sm_object);
ASSERT0(scip->scip_vdev);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
uint64_t obsolete_sm_obj;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
ASSERT3U(obsolete_sm_obj, !=, 0);
scip->scip_vdev = vd->vdev_id;
scip->scip_next_mapping_object =
vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
/*
* We don't need to allocate a new space map object, since
* vdev_indirect_sync_obsolete will allocate one when needed.
*/
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (*scip) / sizeof (uint64_t), scip, tx));
ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
"posm=%llu nm=%llu",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)scip->scip_prev_obsolete_sm_object,
(u_longlong_t)scip->scip_next_mapping_object);
zthr_wakeup(spa->spa_condense_zthr);
}
/*
* Sync to the given vdev's obsolete space map any segments that are no longer
* referenced as of the given txg.
*
* If the obsolete space map doesn't exist yet, create and open it.
*/
void
vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
ASSERT3U(vic->vic_mapping_object, !=, 0);
- ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
+ ASSERT(zfs_range_tree_space(vd->vdev_obsolete_segments) > 0);
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object == 0) {
obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
zfs_vdev_standard_sm_blksz, tx);
ASSERT(vd->vdev_top_zap != 0);
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
ASSERT3U(obsolete_sm_object, !=, 0);
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
spa->spa_meta_objset, obsolete_sm_object,
0, vd->vdev_asize, 0));
}
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
space_map_write(vd->vdev_obsolete_sm,
vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
- range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
+ zfs_range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
}
int
spa_condense_init(spa_t *spa)
{
int error = zap_lookup(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
&spa->spa_condensing_indirect_phys);
if (error == 0) {
if (spa_writeable(spa)) {
spa->spa_condensing_indirect =
spa_condensing_indirect_create(spa);
}
return (0);
} else if (error == ENOENT) {
return (0);
} else {
return (error);
}
}
void
spa_condense_fini(spa_t *spa)
{
if (spa->spa_condensing_indirect != NULL) {
spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = NULL;
}
}
void
spa_start_indirect_condensing_thread(spa_t *spa)
{
ASSERT3P(spa->spa_condense_zthr, ==, NULL);
spa->spa_condense_zthr = zthr_create("z_indirect_condense",
spa_condense_indirect_thread_check,
spa_condense_indirect_thread, spa, minclsyspri);
}
/*
* Gets the obsolete spacemap object from the vdev's ZAP. On success sm_obj
* will contain either the obsolete spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
/*
* Gets the obsolete count are precise spacemap object from the vdev's ZAP.
* On success are_precise will be set to reflect if the counts are precise.
* All other errors are returned to the caller.
*/
int
vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*are_precise = B_FALSE;
return (0);
}
uint64_t val = 0;
int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
if (error == 0) {
*are_precise = (val != 0);
} else if (error == ENOENT) {
*are_precise = B_FALSE;
error = 0;
}
return (error);
}
static void
vdev_indirect_close(vdev_t *vd)
{
(void) vd;
}
static int
vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
*psize = *max_psize = vd->vdev_asize +
VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
*logical_ashift = vd->vdev_ashift;
*physical_ashift = vd->vdev_physical_ashift;
return (0);
}
typedef struct remap_segment {
vdev_t *rs_vd;
uint64_t rs_offset;
uint64_t rs_asize;
uint64_t rs_split_offset;
list_node_t rs_node;
} remap_segment_t;
static remap_segment_t *
rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
{
remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
rs->rs_vd = vd;
rs->rs_offset = offset;
rs->rs_asize = asize;
rs->rs_split_offset = split_offset;
return (rs);
}
/*
* Given an indirect vdev and an extent on that vdev, it duplicates the
* physical entries of the indirect mapping that correspond to the extent
* to a new array and returns a pointer to it. In addition, copied_entries
* is populated with the number of mapping entries that were duplicated.
*
* Note that the function assumes that the caller holds vdev_indirect_rwlock.
* This ensures that the mapping won't change due to condensing as we
* copy over its contents.
*
* Finally, since we are doing an allocation, it is up to the caller to
* free the array allocated in this function.
*/
static vdev_indirect_mapping_entry_phys_t *
vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
uint64_t asize, uint64_t *copied_entries)
{
vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t entries = 0;
ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
vdev_indirect_mapping_entry_phys_t *first_mapping =
vdev_indirect_mapping_entry_for_offset(vim, offset);
ASSERT3P(first_mapping, !=, NULL);
vdev_indirect_mapping_entry_phys_t *m = first_mapping;
while (asize > 0) {
uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
uint64_t inner_size = MIN(asize, size - inner_offset);
offset += inner_size;
asize -= inner_size;
entries++;
m++;
}
size_t copy_length = entries * sizeof (*first_mapping);
duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
memcpy(duplicate_mappings, first_mapping, copy_length);
*copied_entries = entries;
return (duplicate_mappings);
}
/*
* Goes through the relevant indirect mappings until it hits a concrete vdev
* and issues the callback. On the way to the concrete vdev, if any other
* indirect vdevs are encountered, then the callback will also be called on
* each of those indirect vdevs. For example, if the segment is mapped to
* segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
* mapped to segment B on concrete vdev 2, then the callback will be called on
* both vdev 1 and vdev 2.
*
* While the callback passed to vdev_indirect_remap() is called on every vdev
* the function encounters, certain callbacks only care about concrete vdevs.
* These types of callbacks should return immediately and explicitly when they
* are called on an indirect vdev.
*
* Because there is a possibility that a DVA section in the indirect device
* has been split into multiple sections in our mapping, we keep track
* of the relevant contiguous segments of the new location (remap_segment_t)
* in a stack. This way we can call the callback for each of the new sections
* created by a single section of the indirect device. Note though, that in
* this scenario the callbacks in each split block won't occur in-order in
* terms of offset, so callers should not make any assumptions about that.
*
* For callbacks that don't handle split blocks and immediately return when
* they encounter them (as is the case for remap_blkptr_cb), the caller can
* assume that its callback will be applied from the first indirect vdev
* encountered to the last one and then the concrete vdev, in that order.
*/
static void
vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
{
list_t stack;
spa_t *spa = vd->vdev_spa;
list_create(&stack, sizeof (remap_segment_t),
offsetof(remap_segment_t, rs_node));
for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
rs != NULL; rs = list_remove_head(&stack)) {
vdev_t *v = rs->rs_vd;
uint64_t num_entries = 0;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
ASSERT(rs->rs_asize > 0);
/*
* Note: As this function can be called from open context
* (e.g. zio_read()), we need the following rwlock to
* prevent the mapping from being changed by condensing.
*
* So we grab the lock and we make a copy of the entries
* that are relevant to the extent that we are working on.
* Once that is done, we drop the lock and iterate over
* our copy of the mapping. Once we are done with the with
* the remap segment and we free it, we also free our copy
* of the indirect mapping entries that are relevant to it.
*
* This way we don't need to wait until the function is
* finished with a segment, to condense it. In addition, we
* don't need a recursive rwlock for the case that a call to
* vdev_indirect_remap() needs to call itself (through the
* codepath of its callback) for the same vdev in the middle
* of its execution.
*/
rw_enter(&v->vdev_indirect_rwlock, RW_READER);
ASSERT3P(v->vdev_indirect_mapping, !=, NULL);
vdev_indirect_mapping_entry_phys_t *mapping =
vdev_indirect_mapping_duplicate_adjacent_entries(v,
rs->rs_offset, rs->rs_asize, &num_entries);
ASSERT3P(mapping, !=, NULL);
ASSERT3U(num_entries, >, 0);
rw_exit(&v->vdev_indirect_rwlock);
for (uint64_t i = 0; i < num_entries; i++) {
/*
* Note: the vdev_indirect_mapping can not change
* while we are running. It only changes while the
* removal is in progress, and then only from syncing
* context. While a removal is in progress, this
* function is only called for frees, which also only
* happen from syncing context.
*/
vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
ASSERT3P(m, !=, NULL);
ASSERT3U(rs->rs_asize, >, 0);
uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
ASSERT3U(rs->rs_offset, >=,
DVA_MAPPING_GET_SRC_OFFSET(m));
ASSERT3U(rs->rs_offset, <,
DVA_MAPPING_GET_SRC_OFFSET(m) + size);
ASSERT3U(dst_vdev, !=, v->vdev_id);
uint64_t inner_offset = rs->rs_offset -
DVA_MAPPING_GET_SRC_OFFSET(m);
uint64_t inner_size =
MIN(rs->rs_asize, size - inner_offset);
vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
ASSERT3P(dst_v, !=, NULL);
if (dst_v->vdev_ops == &vdev_indirect_ops) {
list_insert_head(&stack,
rs_alloc(dst_v, dst_offset + inner_offset,
inner_size, rs->rs_split_offset));
}
if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
/*
* Note: This clause exists only solely for
* testing purposes. We use it to ensure that
* split blocks work and that the callbacks
* using them yield the same result if issued
* in reverse order.
*/
uint64_t inner_half = inner_size / 2;
func(rs->rs_split_offset + inner_half, dst_v,
dst_offset + inner_offset + inner_half,
inner_half, arg);
func(rs->rs_split_offset, dst_v,
dst_offset + inner_offset,
inner_half, arg);
} else {
func(rs->rs_split_offset, dst_v,
dst_offset + inner_offset,
inner_size, arg);
}
rs->rs_offset += inner_size;
rs->rs_asize -= inner_size;
rs->rs_split_offset += inner_size;
}
VERIFY0(rs->rs_asize);
kmem_free(mapping, num_entries * sizeof (*mapping));
kmem_free(rs, sizeof (remap_segment_t));
}
list_destroy(&stack);
}
static void
vdev_indirect_child_io_done(zio_t *zio)
{
zio_t *pio = zio->io_private;
mutex_enter(&pio->io_lock);
pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
mutex_exit(&pio->io_lock);
abd_free(zio->io_abd);
}
/*
* This is a callback for vdev_indirect_remap() which allocates an
* indirect_split_t for each split segment and adds it to iv_splits.
*/
static void
vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
zio_t *zio = arg;
indirect_vsd_t *iv = zio->io_vsd;
ASSERT3P(vd, !=, NULL);
if (vd->vdev_ops == &vdev_indirect_ops)
return;
int n = 1;
if (vd->vdev_ops == &vdev_mirror_ops)
n = vd->vdev_children;
indirect_split_t *is =
kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
is->is_children = n;
is->is_size = size;
is->is_split_offset = split_offset;
is->is_target_offset = offset;
is->is_vdev = vd;
list_create(&is->is_unique_child, sizeof (indirect_child_t),
offsetof(indirect_child_t, ic_node));
/*
* Note that we only consider multiple copies of the data for
* *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
* though they use the same ops as mirror, because there's only one
* "good" copy under the replacing/spare.
*/
if (vd->vdev_ops == &vdev_mirror_ops) {
for (int i = 0; i < n; i++) {
is->is_child[i].ic_vdev = vd->vdev_child[i];
list_link_init(&is->is_child[i].ic_node);
}
} else {
is->is_child[0].ic_vdev = vd;
}
list_insert_tail(&iv->iv_splits, is);
}
static void
vdev_indirect_read_split_done(zio_t *zio)
{
indirect_child_t *ic = zio->io_private;
if (zio->io_error != 0) {
/*
* Clear ic_data to indicate that we do not have data for this
* child.
*/
abd_free(ic->ic_data);
ic->ic_data = NULL;
}
}
/*
* Issue reads for all copies (mirror children) of all splits.
*/
static void
vdev_indirect_read_all(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic = &is->is_child[i];
if (!vdev_readable(ic->ic_vdev))
continue;
/*
* If a child is missing the data, set ic_error. Used
* in vdev_indirect_repair(). We perform the read
* nevertheless which provides the opportunity to
* reconstruct the split block if at all possible.
*/
if (vdev_dtl_contains(ic->ic_vdev, DTL_MISSING,
zio->io_txg, 1))
ic->ic_error = SET_ERROR(ESTALE);
ic->ic_data = abd_alloc_sametype(zio->io_abd,
is->is_size);
ic->ic_duplicate = NULL;
zio_nowait(zio_vdev_child_io(zio, NULL,
ic->ic_vdev, is->is_target_offset, ic->ic_data,
is->is_size, zio->io_type, zio->io_priority, 0,
vdev_indirect_read_split_done, ic));
}
}
iv->iv_reconstruct = B_TRUE;
}
static void
vdev_indirect_io_start(zio_t *zio)
{
spa_t *spa __maybe_unused = zio->io_spa;
indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
list_create(&iv->iv_splits,
sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
zio->io_vsd = iv;
zio->io_vsd_ops = &vdev_indirect_vsd_ops;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (zio->io_type != ZIO_TYPE_READ) {
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
/*
* Note: this code can handle other kinds of writes,
* but we don't expect them.
*/
ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
}
vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
vdev_indirect_gather_splits, zio);
indirect_split_t *first = list_head(&iv->iv_splits);
ASSERT3P(first, !=, NULL);
if (first->is_size == zio->io_size) {
/*
* This is not a split block; we are pointing to the entire
* data, which will checksum the same as the original data.
* Pass the BP down so that the child i/o can verify the
* checksum, and try a different location if available
* (e.g. on a mirror).
*
* While this special case could be handled the same as the
* general (split block) case, doing it this way ensures
* that the vast majority of blocks on indirect vdevs
* (which are not split) are handled identically to blocks
* on non-indirect vdevs. This allows us to be less strict
* about performance in the general (but rare) case.
*/
ASSERT0(first->is_split_offset);
ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
first->is_vdev, first->is_target_offset,
abd_get_offset(zio->io_abd, 0),
zio->io_size, zio->io_type, zio->io_priority, 0,
vdev_indirect_child_io_done, zio));
} else {
iv->iv_split_block = B_TRUE;
if (zio->io_type == ZIO_TYPE_READ &&
zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
/*
* Read all copies. Note that for simplicity,
* we don't bother consulting the DTL in the
* resilver case.
*/
vdev_indirect_read_all(zio);
} else {
/*
* If this is a read zio, we read one copy of each
* split segment, from the top-level vdev. Since
* we don't know the checksum of each split
* individually, the child zio can't ensure that
* we get the right data. E.g. if it's a mirror,
* it will just read from a random (healthy) leaf
* vdev. We have to verify the checksum in
* vdev_indirect_io_done().
*
* For write zios, the vdev code will ensure we write
* to all children.
*/
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
zio_nowait(zio_vdev_child_io(zio, NULL,
is->is_vdev, is->is_target_offset,
abd_get_offset_size(zio->io_abd,
is->is_split_offset, is->is_size),
is->is_size, zio->io_type,
zio->io_priority, 0,
vdev_indirect_child_io_done, zio));
}
}
}
zio_execute(zio);
}
/*
* Report a checksum error for a child.
*/
static void
vdev_indirect_checksum_error(zio_t *zio,
indirect_split_t *is, indirect_child_t *ic)
{
vdev_t *vd = ic->ic_vdev;
if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
return;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
zio_bad_cksum_t zbc = { 0 };
abd_t *bad_abd = ic->ic_data;
abd_t *good_abd = is->is_good_child->ic_data;
(void) zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc);
}
/*
* Issue repair i/os for any incorrect copies. We do this by comparing
* each split segment's correct data (is_good_child's ic_data) with each
* other copy of the data. If they differ, then we overwrite the bad data
* with the good copy. The DTL is checked in vdev_indirect_read_all() and
* if a vdev is missing a copy of the data we set ic_error and the read is
* performed. This provides the opportunity to reconstruct the split block
* if at all possible. ic_error is checked here and if set it suppresses
* incrementing the checksum counter. Aside from this DTLs are not checked,
* which simplifies this code and also issues the optimal number of writes
* (based on which copies actually read bad data, as opposed to which we
* think might be wrong). For the same reason, we always use
* ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
*/
static void
vdev_indirect_repair(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (!spa_writeable(zio->io_spa))
return;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic == is->is_good_child)
continue;
if (ic->ic_data == NULL)
continue;
if (ic->ic_duplicate == is->is_good_child)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL,
ic->ic_vdev, is->is_target_offset,
is->is_good_child->ic_data, is->is_size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
NULL, NULL));
/*
* If ic_error is set the current child does not have
* a copy of the data, so suppress incrementing the
* checksum counter.
*/
if (ic->ic_error == ESTALE)
continue;
vdev_indirect_checksum_error(zio, is, ic);
}
}
}
/*
* Report checksum errors on all children that we read from.
*/
static void
vdev_indirect_all_checksum_errors(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
return;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic->ic_data == NULL)
continue;
vdev_t *vd = ic->ic_vdev;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
(void) zfs_ereport_post_checksum(zio->io_spa, vd,
NULL, zio, is->is_target_offset, is->is_size,
NULL, NULL, NULL);
}
}
}
/*
* Copy data from all the splits to a main zio then validate the checksum.
* If then checksum is successfully validated return success.
*/
static int
vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
{
zio_bad_cksum_t zbc;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
ASSERT3P(is->is_good_child->ic_data, !=, NULL);
ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
is->is_split_offset, 0, is->is_size);
}
return (zio_checksum_error(zio, &zbc));
}
/*
* There are relatively few possible combinations making it feasible to
* deterministically check them all. We do this by setting the good_child
* to the next unique split version. If we reach the end of the list then
* "carry over" to the next unique split version (like counting in base
* is_unique_children, but each digit can have a different base).
*/
static int
vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
{
boolean_t more = B_TRUE;
iv->iv_attempts = 0;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is))
is->is_good_child = list_head(&is->is_unique_child);
while (more == B_TRUE) {
iv->iv_attempts++;
more = B_FALSE;
if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
return (0);
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_good_child = list_next(&is->is_unique_child,
is->is_good_child);
if (is->is_good_child != NULL) {
more = B_TRUE;
break;
}
is->is_good_child = list_head(&is->is_unique_child);
}
}
ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
return (SET_ERROR(ECKSUM));
}
/*
* There are too many combinations to try all of them in a reasonable amount
* of time. So try a fixed number of random combinations from the unique
* split versions, after which we'll consider the block unrecoverable.
*/
static int
vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
{
iv->iv_attempts = 0;
while (iv->iv_attempts < iv->iv_attempts_max) {
iv->iv_attempts++;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
indirect_child_t *ic = list_head(&is->is_unique_child);
int children = is->is_unique_children;
for (int i = random_in_range(children); i > 0; i--)
ic = list_next(&is->is_unique_child, ic);
ASSERT3P(ic, !=, NULL);
is->is_good_child = ic;
}
if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
return (0);
}
return (SET_ERROR(ECKSUM));
}
/*
* This is a validation function for reconstruction. It randomly selects
* a good combination, if one can be found, and then it intentionally
* damages all other segment copes by zeroing them. This forces the
* reconstruction algorithm to locate the one remaining known good copy.
*/
static int
vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
{
int error;
/* Presume all the copies are unique for initial selection. */
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_unique_children = 0;
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic = &is->is_child[i];
if (ic->ic_data != NULL) {
is->is_unique_children++;
list_insert_tail(&is->is_unique_child, ic);
}
}
if (list_is_empty(&is->is_unique_child)) {
error = SET_ERROR(EIO);
goto out;
}
}
/*
* Set each is_good_child to a randomly-selected child which
* is known to contain validated data.
*/
error = vdev_indirect_splits_enumerate_randomly(iv, zio);
if (error)
goto out;
/*
* Damage all but the known good copy by zeroing it. This will
* result in two or less unique copies per indirect_child_t.
* Both may need to be checked in order to reconstruct the block.
* Set iv->iv_attempts_max such that all unique combinations will
* enumerated, but limit the damage to at most 12 indirect splits.
*/
iv->iv_attempts_max = 1;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic == is->is_good_child)
continue;
if (ic->ic_data == NULL)
continue;
abd_zero(ic->ic_data, abd_get_size(ic->ic_data));
}
iv->iv_attempts_max *= 2;
if (iv->iv_attempts_max >= (1ULL << 12)) {
iv->iv_attempts_max = UINT64_MAX;
break;
}
}
out:
/* Empty the unique children lists so they can be reconstructed. */
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
indirect_child_t *ic;
while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
;
is->is_unique_children = 0;
}
return (error);
}
/*
* This function is called when we have read all copies of the data and need
* to try to find a combination of copies that gives us the right checksum.
*
* If we pointed to any mirror vdevs, this effectively does the job of the
* mirror. The mirror vdev code can't do its own job because we don't know
* the checksum of each split segment individually.
*
* We have to try every unique combination of copies of split segments, until
* we find one that checksums correctly. Duplicate segment copies are first
* identified and latter skipped during reconstruction. This optimization
* reduces the search space and ensures that of the remaining combinations
* at most one is correct.
*
* When the total number of combinations is small they can all be checked.
* For example, if we have 3 segments in the split, and each points to a
* 2-way mirror with unique copies, we will have the following pieces of data:
*
* | mirror child
* split | [0] [1]
* ======|=====================
* A | data_A_0 data_A_1
* B | data_B_0 data_B_1
* C | data_C_0 data_C_1
*
* We will try the following (mirror children)^(number of splits) (2^3=8)
* combinations, which is similar to bitwise-little-endian counting in
* binary. In general each "digit" corresponds to a split segment, and the
* base of each digit is is_children, which can be different for each
* digit.
*
* "low bit" "high bit"
* v v
* data_A_0 data_B_0 data_C_0
* data_A_1 data_B_0 data_C_0
* data_A_0 data_B_1 data_C_0
* data_A_1 data_B_1 data_C_0
* data_A_0 data_B_0 data_C_1
* data_A_1 data_B_0 data_C_1
* data_A_0 data_B_1 data_C_1
* data_A_1 data_B_1 data_C_1
*
* Note that the split segments may be on the same or different top-level
* vdevs. In either case, we may need to try lots of combinations (see
* zfs_reconstruct_indirect_combinations_max). This ensures that if a mirror
* has small silent errors on all of its children, we can still reconstruct
* the correct data, as long as those errors are at sufficiently-separated
* offsets (specifically, separated by the largest block size - default of
* 128KB, but up to 16MB).
*/
static void
vdev_indirect_reconstruct_io_done(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
boolean_t known_good = B_FALSE;
int error;
iv->iv_unique_combinations = 1;
iv->iv_attempts_max = UINT64_MAX;
if (zfs_reconstruct_indirect_combinations_max > 0)
iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
/*
* If nonzero, every 1/x blocks will be damaged, in order to validate
* reconstruction when there are split segments with damaged copies.
* Known_good will be TRUE when reconstruction is known to be possible.
*/
if (zfs_reconstruct_indirect_damage_fraction != 0 &&
random_in_range(zfs_reconstruct_indirect_damage_fraction) == 0)
known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
/*
* Determine the unique children for a split segment and add them
* to the is_unique_child list. By restricting reconstruction
* to these children, only unique combinations will be considered.
* This can vastly reduce the search space when there are a large
* number of indirect splits.
*/
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_unique_children = 0;
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic_i = &is->is_child[i];
if (ic_i->ic_data == NULL ||
ic_i->ic_duplicate != NULL)
continue;
for (int j = i + 1; j < is->is_children; j++) {
indirect_child_t *ic_j = &is->is_child[j];
if (ic_j->ic_data == NULL ||
ic_j->ic_duplicate != NULL)
continue;
if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0)
ic_j->ic_duplicate = ic_i;
}
is->is_unique_children++;
list_insert_tail(&is->is_unique_child, ic_i);
}
/* Reconstruction is impossible, no valid children */
EQUIV(list_is_empty(&is->is_unique_child),
is->is_unique_children == 0);
if (list_is_empty(&is->is_unique_child)) {
zio->io_error = EIO;
vdev_indirect_all_checksum_errors(zio);
zio_checksum_verified(zio);
return;
}
iv->iv_unique_combinations *= is->is_unique_children;
}
if (iv->iv_unique_combinations <= iv->iv_attempts_max)
error = vdev_indirect_splits_enumerate_all(iv, zio);
else
error = vdev_indirect_splits_enumerate_randomly(iv, zio);
if (error != 0) {
/* All attempted combinations failed. */
ASSERT3B(known_good, ==, B_FALSE);
zio->io_error = error;
vdev_indirect_all_checksum_errors(zio);
} else {
/*
* The checksum has been successfully validated. Issue
* repair I/Os to any copies of splits which don't match
* the validated version.
*/
ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
vdev_indirect_repair(zio);
zio_checksum_verified(zio);
}
}
static void
vdev_indirect_io_done(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (iv->iv_reconstruct) {
/*
* We have read all copies of the data (e.g. from mirrors),
* either because this was a scrub/resilver, or because the
* one-copy read didn't checksum correctly.
*/
vdev_indirect_reconstruct_io_done(zio);
return;
}
if (!iv->iv_split_block) {
/*
* This was not a split block, so we passed the BP down,
* and the checksum was handled by the (one) child zio.
*/
return;
}
zio_bad_cksum_t zbc;
int ret = zio_checksum_error(zio, &zbc);
/*
* Any Direct I/O read that has a checksum error must be treated as
* suspicious as the contents of the buffer could be getting
* manipulated while the I/O is taking place. The checksum verify error
* will be reported to the top-level VDEV.
*/
if (zio->io_flags & ZIO_FLAG_DIO_READ && ret == ECKSUM) {
zio->io_error = ret;
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
zio_dio_chksum_verify_error_report(zio);
ret = 0;
}
if (ret == 0) {
zio_checksum_verified(zio);
return;
}
/*
* The checksum didn't match. Read all copies of all splits, and
* then we will try to reconstruct. The next time
* vdev_indirect_io_done() is called, iv_reconstruct will be set.
*/
vdev_indirect_read_all(zio);
zio_vdev_io_redone(zio);
}
vdev_ops_t vdev_indirect_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_indirect_open,
.vdev_op_close = vdev_indirect_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_indirect_io_start,
.vdev_op_io_done = vdev_indirect_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = vdev_indirect_remap,
.vdev_op_xlate = NULL,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_INDIRECT, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* leaf vdev */
};
EXPORT_SYMBOL(spa_condense_fini);
EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
EXPORT_SYMBOL(spa_condense_indirect_start_sync);
EXPORT_SYMBOL(spa_condense_init);
EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
EXPORT_SYMBOL(vdev_indirect_should_condense);
EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
EXPORT_SYMBOL(vdev_obsolete_sm_object);
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT,
ZMOD_RW, "Whether to attempt condensing indirect vdev mappings");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, UINT,
ZMOD_RW,
"Minimum obsolete percent of bytes in the mapping "
"to attempt condensing");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, min_mapping_bytes, U64, ZMOD_RW,
"Don't bother condensing if the mapping uses less than this amount of "
"memory");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, U64,
ZMOD_RW,
"Minimum size obsolete spacemap to attempt condensing");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms,
UINT, ZMOD_RW,
"Used by tests to ensure certain actions happen in the middle of a "
"condense. A maximum value of 1 should be sufficient.");
ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max,
UINT, ZMOD_RW,
"Maximum number of combinations when reconstructing split segments");
diff --git a/sys/contrib/openzfs/module/zfs/vdev_initialize.c b/sys/contrib/openzfs/module/zfs/vdev_initialize.c
index 0a7323f58df2..f6e2662bd40f 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_initialize.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_initialize.c
@@ -1,832 +1,833 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, 2024 by Delphix. All rights reserved.
*/
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/txg.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_synctask.h>
#include <sys/zap.h>
#include <sys/dmu_tx.h>
#include <sys/vdev_initialize.h>
/*
* Value that is written to disk during initialization.
*/
static uint64_t zfs_initialize_value = 0xdeadbeefdeadbeeeULL;
/* maximum number of I/Os outstanding per leaf vdev */
static const int zfs_initialize_limit = 1;
/* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
static uint64_t zfs_initialize_chunk_size = 1024 * 1024;
static boolean_t
vdev_initialize_should_stop(vdev_t *vd)
{
return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) ||
vd->vdev_detached || vd->vdev_top->vdev_removing ||
vd->vdev_top->vdev_rz_expanding);
}
static void
vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx)
{
/*
* We pass in the guid instead of the vdev_t since the vdev may
* have been freed prior to the sync task being processed. This
* happens when a vdev is detached as we call spa_config_vdev_exit(),
* stop the initializing thread, schedule the sync task, and free
* the vdev. Later when the scheduled sync task is invoked, it would
* find that the vdev has been freed.
*/
uint64_t guid = *(uint64_t *)arg;
uint64_t txg = dmu_tx_get_txg(tx);
kmem_free(arg, sizeof (uint64_t));
vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_top->vdev_removing ||
!vdev_is_concrete(vd) || vd->vdev_top->vdev_rz_expanding)
return;
uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
vd->vdev_initialize_offset[txg & TXG_MASK] = 0;
VERIFY(vd->vdev_leaf_zap != 0);
objset_t *mos = vd->vdev_spa->spa_meta_objset;
if (last_offset > 0) {
vd->vdev_initialize_last_offset = last_offset;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
sizeof (last_offset), 1, &last_offset, tx));
}
if (vd->vdev_initialize_action_time > 0) {
uint64_t val = (uint64_t)vd->vdev_initialize_action_time;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val),
1, &val, tx));
}
uint64_t initialize_state = vd->vdev_initialize_state;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1,
&initialize_state, tx));
}
static void
vdev_initialize_zap_remove_sync(void *arg, dmu_tx_t *tx)
{
uint64_t guid = *(uint64_t *)arg;
kmem_free(arg, sizeof (uint64_t));
vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
return;
ASSERT3S(vd->vdev_initialize_state, ==, VDEV_INITIALIZE_NONE);
ASSERT3U(vd->vdev_leaf_zap, !=, 0);
vd->vdev_initialize_last_offset = 0;
vd->vdev_initialize_action_time = 0;
objset_t *mos = vd->vdev_spa->spa_meta_objset;
int error;
error = zap_remove(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, tx);
VERIFY(error == 0 || error == ENOENT);
error = zap_remove(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_INITIALIZE_STATE, tx);
VERIFY(error == 0 || error == ENOENT);
error = zap_remove(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, tx);
VERIFY(error == 0 || error == ENOENT);
}
static void
vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
{
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
spa_t *spa = vd->vdev_spa;
if (new_state == vd->vdev_initialize_state)
return;
/*
* Copy the vd's guid, this will be freed by the sync task.
*/
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/*
* If we're suspending, then preserving the original start time.
*/
if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) {
vd->vdev_initialize_action_time = gethrestime_sec();
}
vdev_initializing_state_t old_state = vd->vdev_initialize_state;
vd->vdev_initialize_state = new_state;
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
if (new_state != VDEV_INITIALIZE_NONE) {
dsl_sync_task_nowait(spa_get_dsl(spa),
vdev_initialize_zap_update_sync, guid, tx);
} else {
dsl_sync_task_nowait(spa_get_dsl(spa),
vdev_initialize_zap_remove_sync, guid, tx);
}
switch (new_state) {
case VDEV_INITIALIZE_ACTIVE:
spa_history_log_internal(spa, "initialize", tx,
"vdev=%s activated", vd->vdev_path);
break;
case VDEV_INITIALIZE_SUSPENDED:
spa_history_log_internal(spa, "initialize", tx,
"vdev=%s suspended", vd->vdev_path);
break;
case VDEV_INITIALIZE_CANCELED:
if (old_state == VDEV_INITIALIZE_ACTIVE ||
old_state == VDEV_INITIALIZE_SUSPENDED)
spa_history_log_internal(spa, "initialize", tx,
"vdev=%s canceled", vd->vdev_path);
break;
case VDEV_INITIALIZE_COMPLETE:
spa_history_log_internal(spa, "initialize", tx,
"vdev=%s complete", vd->vdev_path);
break;
case VDEV_INITIALIZE_NONE:
spa_history_log_internal(spa, "uninitialize", tx,
"vdev=%s", vd->vdev_path);
break;
default:
panic("invalid state %llu", (unsigned long long)new_state);
}
dmu_tx_commit(tx);
if (new_state != VDEV_INITIALIZE_ACTIVE)
spa_notify_waiters(spa);
}
static void
vdev_initialize_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_initialize_io_lock);
if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
/*
* The I/O failed because the vdev was unavailable; roll the
* last offset back. (This works because spa_sync waits on
* spa_txg_zio before it runs sync tasks.)
*/
uint64_t *off =
&vd->vdev_initialize_offset[zio->io_txg & TXG_MASK];
*off = MIN(*off, zio->io_offset);
} else {
/*
* Since initializing is best-effort, we ignore I/O errors and
* rely on vdev_probe to determine if the errors are more
* critical.
*/
if (zio->io_error != 0)
vd->vdev_stat.vs_initialize_errors++;
vd->vdev_initialize_bytes_done += zio->io_orig_size;
}
ASSERT3U(vd->vdev_initialize_inflight, >, 0);
vd->vdev_initialize_inflight--;
cv_broadcast(&vd->vdev_initialize_io_cv);
mutex_exit(&vd->vdev_initialize_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/* Takes care of physical writing and limiting # of concurrent ZIOs. */
static int
vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
{
spa_t *spa = vd->vdev_spa;
/* Limit inflight initializing I/Os */
mutex_enter(&vd->vdev_initialize_io_lock);
while (vd->vdev_initialize_inflight >= zfs_initialize_limit) {
cv_wait(&vd->vdev_initialize_io_cv,
&vd->vdev_initialize_io_lock);
}
vd->vdev_initialize_inflight++;
mutex_exit(&vd->vdev_initialize_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
mutex_enter(&vd->vdev_initialize_lock);
if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) {
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/* This is the first write of this txg. */
dsl_sync_task_nowait(spa_get_dsl(spa),
vdev_initialize_zap_update_sync, guid, tx);
}
/*
* We know the vdev struct will still be around since all
* consumers of vdev_free must stop the initialization first.
*/
if (vdev_initialize_should_stop(vd)) {
mutex_enter(&vd->vdev_initialize_io_lock);
ASSERT3U(vd->vdev_initialize_inflight, >, 0);
vd->vdev_initialize_inflight--;
mutex_exit(&vd->vdev_initialize_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
mutex_exit(&vd->vdev_initialize_lock);
dmu_tx_commit(tx);
return (SET_ERROR(EINTR));
}
mutex_exit(&vd->vdev_initialize_lock);
vd->vdev_initialize_offset[txg & TXG_MASK] = start + size;
zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start,
size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL,
ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE));
/* vdev_initialize_cb releases SCL_STATE_ALL */
dmu_tx_commit(tx);
return (0);
}
/*
* Callback to fill each ABD chunk with zfs_initialize_value. len must be
* divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
* allocation will guarantee these for us.
*/
static int
vdev_initialize_block_fill(void *buf, size_t len, void *unused)
{
(void) unused;
ASSERT0(len % sizeof (uint64_t));
for (uint64_t i = 0; i < len; i += sizeof (uint64_t)) {
*(uint64_t *)((char *)(buf) + i) = zfs_initialize_value;
}
return (0);
}
static abd_t *
vdev_initialize_block_alloc(void)
{
/* Allocate ABD for filler data */
abd_t *data = abd_alloc_for_io(zfs_initialize_chunk_size, B_FALSE);
ASSERT0(zfs_initialize_chunk_size % sizeof (uint64_t));
(void) abd_iterate_func(data, 0, zfs_initialize_chunk_size,
vdev_initialize_block_fill, NULL);
return (data);
}
static void
vdev_initialize_block_free(abd_t *data)
{
abd_free(data);
}
static int
vdev_initialize_ranges(vdev_t *vd, abd_t *data)
{
- range_tree_t *rt = vd->vdev_initialize_tree;
+ zfs_range_tree_t *rt = vd->vdev_initialize_tree;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t where;
- for (range_seg_t *rs = zfs_btree_first(bt, &where); rs != NULL;
+ for (zfs_range_seg_t *rs = zfs_btree_first(bt, &where); rs != NULL;
rs = zfs_btree_next(bt, &where, &where)) {
- uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
+ uint64_t size = zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt);
/* Split range into legally-sized physical chunks */
uint64_t writes_required =
((size - 1) / zfs_initialize_chunk_size) + 1;
for (uint64_t w = 0; w < writes_required; w++) {
int error;
error = vdev_initialize_write(vd,
- VDEV_LABEL_START_SIZE + rs_get_start(rs, rt) +
+ VDEV_LABEL_START_SIZE + zfs_rs_get_start(rs, rt) +
(w * zfs_initialize_chunk_size),
MIN(size - (w * zfs_initialize_chunk_size),
zfs_initialize_chunk_size), data);
if (error != 0)
return (error);
}
}
return (0);
}
static void
-vdev_initialize_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
+vdev_initialize_xlate_last_rs_end(void *arg, zfs_range_seg64_t *physical_rs)
{
uint64_t *last_rs_end = (uint64_t *)arg;
if (physical_rs->rs_end > *last_rs_end)
*last_rs_end = physical_rs->rs_end;
}
static void
-vdev_initialize_xlate_progress(void *arg, range_seg64_t *physical_rs)
+vdev_initialize_xlate_progress(void *arg, zfs_range_seg64_t *physical_rs)
{
vdev_t *vd = (vdev_t *)arg;
uint64_t size = physical_rs->rs_end - physical_rs->rs_start;
vd->vdev_initialize_bytes_est += size;
if (vd->vdev_initialize_last_offset > physical_rs->rs_end) {
vd->vdev_initialize_bytes_done += size;
} else if (vd->vdev_initialize_last_offset > physical_rs->rs_start &&
vd->vdev_initialize_last_offset < physical_rs->rs_end) {
vd->vdev_initialize_bytes_done +=
vd->vdev_initialize_last_offset - physical_rs->rs_start;
}
}
static void
vdev_initialize_calculate_progress(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
vd->vdev_initialize_bytes_est = 0;
vd->vdev_initialize_bytes_done = 0;
for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
mutex_enter(&msp->ms_lock);
uint64_t ms_free = (msp->ms_size -
metaslab_allocated_space(msp)) /
vdev_get_ndisks(vd->vdev_top);
/*
* Convert the metaslab range to a physical range
* on our vdev. We use this to determine if we are
* in the middle of this metaslab range.
*/
- range_seg64_t logical_rs, physical_rs, remain_rs;
+ zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = msp->ms_start;
logical_rs.rs_end = msp->ms_start + msp->ms_size;
/* Metaslab space after this offset has not been initialized */
vdev_xlate(vd, &logical_rs, &physical_rs, &remain_rs);
if (vd->vdev_initialize_last_offset <= physical_rs.rs_start) {
vd->vdev_initialize_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/* Metaslab space before this offset has been initialized */
uint64_t last_rs_end = physical_rs.rs_end;
if (!vdev_xlate_is_empty(&remain_rs)) {
vdev_xlate_walk(vd, &remain_rs,
vdev_initialize_xlate_last_rs_end, &last_rs_end);
}
if (vd->vdev_initialize_last_offset > last_rs_end) {
vd->vdev_initialize_bytes_done += ms_free;
vd->vdev_initialize_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If we get here, we're in the middle of initializing this
* metaslab. Load it and walk the free tree for more accurate
* progress estimation.
*/
VERIFY0(metaslab_load(msp));
zfs_btree_index_t where;
- range_tree_t *rt = msp->ms_allocatable;
- for (range_seg_t *rs =
+ zfs_range_tree_t *rt = msp->ms_allocatable;
+ for (zfs_range_seg_t *rs =
zfs_btree_first(&rt->rt_root, &where); rs;
rs = zfs_btree_next(&rt->rt_root, &where,
&where)) {
- logical_rs.rs_start = rs_get_start(rs, rt);
- logical_rs.rs_end = rs_get_end(rs, rt);
+ logical_rs.rs_start = zfs_rs_get_start(rs, rt);
+ logical_rs.rs_end = zfs_rs_get_end(rs, rt);
vdev_xlate_walk(vd, &logical_rs,
vdev_initialize_xlate_progress, vd);
}
mutex_exit(&msp->ms_lock);
}
}
static int
vdev_initialize_load(vdev_t *vd)
{
int err = 0;
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
if (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE ||
vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
sizeof (vd->vdev_initialize_last_offset), 1,
&vd->vdev_initialize_last_offset);
if (err == ENOENT) {
vd->vdev_initialize_last_offset = 0;
err = 0;
}
}
vdev_initialize_calculate_progress(vd);
return (err);
}
static void
-vdev_initialize_xlate_range_add(void *arg, range_seg64_t *physical_rs)
+vdev_initialize_xlate_range_add(void *arg, zfs_range_seg64_t *physical_rs)
{
vdev_t *vd = arg;
/* Only add segments that we have not visited yet */
if (physical_rs->rs_end <= vd->vdev_initialize_last_offset)
return;
/* Pick up where we left off mid-range. */
if (vd->vdev_initialize_last_offset > physical_rs->rs_start) {
zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
"(%llu, %llu)", vd->vdev_path,
(u_longlong_t)physical_rs->rs_start,
(u_longlong_t)physical_rs->rs_end,
(u_longlong_t)vd->vdev_initialize_last_offset,
(u_longlong_t)physical_rs->rs_end);
ASSERT3U(physical_rs->rs_end, >,
vd->vdev_initialize_last_offset);
physical_rs->rs_start = vd->vdev_initialize_last_offset;
}
ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start);
- range_tree_add(vd->vdev_initialize_tree, physical_rs->rs_start,
+ zfs_range_tree_add(vd->vdev_initialize_tree, physical_rs->rs_start,
physical_rs->rs_end - physical_rs->rs_start);
}
/*
* Convert the logical range into a physical range and add it to our
* avl tree.
*/
static void
vdev_initialize_range_add(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
- range_seg64_t logical_rs;
+ zfs_range_seg64_t logical_rs;
logical_rs.rs_start = start;
logical_rs.rs_end = start + size;
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_xlate_walk(vd, &logical_rs, vdev_initialize_xlate_range_add, arg);
}
static __attribute__((noreturn)) void
vdev_initialize_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
int error = 0;
uint64_t ms_count = 0;
ASSERT(vdev_is_concrete(vd));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd->vdev_initialize_last_offset = 0;
VERIFY0(vdev_initialize_load(vd));
abd_t *deadbeef = vdev_initialize_block_alloc();
- vd->vdev_initialize_tree = range_tree_create(NULL, RANGE_SEG64, NULL,
- 0, 0);
+ vd->vdev_initialize_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
+ NULL, 0, 0);
for (uint64_t i = 0; !vd->vdev_detached &&
i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
boolean_t unload_when_done = B_FALSE;
/*
* If we've expanded the top-level vdev or it's our
* first pass, calculate our progress.
*/
if (vd->vdev_top->vdev_ms_count != ms_count) {
vdev_initialize_calculate_progress(vd);
ms_count = vd->vdev_top->vdev_ms_count;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_disable(msp);
mutex_enter(&msp->ms_lock);
if (!msp->ms_loaded && !msp->ms_loading)
unload_when_done = B_TRUE;
VERIFY0(metaslab_load(msp));
- range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add,
- vd);
+ zfs_range_tree_walk(msp->ms_allocatable,
+ vdev_initialize_range_add, vd);
mutex_exit(&msp->ms_lock);
error = vdev_initialize_ranges(vd, deadbeef);
metaslab_enable(msp, B_TRUE, unload_when_done);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
- range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL);
+ zfs_range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_enter(&vd->vdev_initialize_io_lock);
while (vd->vdev_initialize_inflight > 0) {
cv_wait(&vd->vdev_initialize_io_cv,
&vd->vdev_initialize_io_lock);
}
mutex_exit(&vd->vdev_initialize_io_lock);
- range_tree_destroy(vd->vdev_initialize_tree);
+ zfs_range_tree_destroy(vd->vdev_initialize_tree);
vdev_initialize_block_free(deadbeef);
vd->vdev_initialize_tree = NULL;
mutex_enter(&vd->vdev_initialize_lock);
if (!vd->vdev_initialize_exit_wanted) {
if (vdev_writeable(vd)) {
vdev_initialize_change_state(vd,
VDEV_INITIALIZE_COMPLETE);
} else if (vd->vdev_faulted) {
vdev_initialize_change_state(vd,
VDEV_INITIALIZE_CANCELED);
}
}
ASSERT(vd->vdev_initialize_thread != NULL ||
vd->vdev_initialize_inflight == 0);
/*
* Drop the vdev_initialize_lock while we sync out the
* txg since it's possible that a device might be trying to
* come online and must check to see if it needs to restart an
* initialization. That thread will be holding the spa_config_lock
* which would prevent the txg_wait_synced from completing.
*/
mutex_exit(&vd->vdev_initialize_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&vd->vdev_initialize_lock);
vd->vdev_initialize_thread = NULL;
cv_broadcast(&vd->vdev_initialize_cv);
mutex_exit(&vd->vdev_initialize_lock);
thread_exit();
}
/*
* Initiates a device. Caller must hold vdev_initialize_lock.
* Device must be a leaf and not already be initializing.
*/
void
vdev_initialize(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_initialize_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
ASSERT(!vd->vdev_top->vdev_rz_expanding);
vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE);
vd->vdev_initialize_thread = thread_create(NULL, 0,
vdev_initialize_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
}
/*
* Uninitializes a device. Caller must hold vdev_initialize_lock.
* Device must be a leaf and not already be initializing.
*/
void
vdev_uninitialize(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_initialize_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
vdev_initialize_change_state(vd, VDEV_INITIALIZE_NONE);
}
/*
* Wait for the initialize thread to be terminated (cancelled or stopped).
*/
static void
vdev_initialize_stop_wait_impl(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
while (vd->vdev_initialize_thread != NULL)
cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock);
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
vd->vdev_initialize_exit_wanted = B_FALSE;
}
/*
* Wait for vdev initialize threads which were either to cleanly exit.
*/
void
vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list)
{
(void) spa;
vdev_t *vd;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_export_thread == curthread);
while ((vd = list_remove_head(vd_list)) != NULL) {
mutex_enter(&vd->vdev_initialize_lock);
vdev_initialize_stop_wait_impl(vd);
mutex_exit(&vd->vdev_initialize_lock);
}
}
/*
* Stop initializing a device, with the resultant initializing state being
* tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when
* a list_t is provided the stopping vdev is inserted in to the list. Callers
* are then required to call vdev_initialize_stop_wait() to block for all the
* initialization threads to exit. The caller must hold vdev_initialize_lock
* and must not be writing to the spa config, as the initializing thread may
* try to enter the config as a reader before exiting.
*/
void
vdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state,
list_t *vd_list)
{
ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER));
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
/*
* Allow cancel requests to proceed even if the initialize thread
* has stopped.
*/
if (vd->vdev_initialize_thread == NULL &&
tgt_state != VDEV_INITIALIZE_CANCELED) {
return;
}
vdev_initialize_change_state(vd, tgt_state);
vd->vdev_initialize_exit_wanted = B_TRUE;
if (vd_list == NULL) {
vdev_initialize_stop_wait_impl(vd);
} else {
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
vd->vdev_spa->spa_export_thread == curthread);
list_insert_tail(vd_list, vd);
}
}
static void
vdev_initialize_stop_all_impl(vdev_t *vd, vdev_initializing_state_t tgt_state,
list_t *vd_list)
{
if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
mutex_enter(&vd->vdev_initialize_lock);
vdev_initialize_stop(vd, tgt_state, vd_list);
mutex_exit(&vd->vdev_initialize_lock);
return;
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_initialize_stop_all_impl(vd->vdev_child[i], tgt_state,
vd_list);
}
}
/*
* Convenience function to stop initializing of a vdev tree and set all
* initialize thread pointers to NULL.
*/
void
vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state)
{
spa_t *spa = vd->vdev_spa;
list_t vd_list;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_export_thread == curthread);
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
vdev_initialize_stop_all_impl(vd, tgt_state, &vd_list);
vdev_initialize_stop_wait(spa, &vd_list);
if (vd->vdev_spa->spa_sync_on) {
/* Make sure that our state has been synced to disk */
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
}
list_destroy(&vd_list);
}
void
vdev_initialize_restart(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
vd->vdev_spa->spa_load_thread == curthread);
ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_leaf_zap != 0) {
mutex_enter(&vd->vdev_initialize_lock);
uint64_t initialize_state = VDEV_INITIALIZE_NONE;
int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_STATE,
sizeof (initialize_state), 1, &initialize_state);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_initialize_state = initialize_state;
uint64_t timestamp = 0;
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME,
sizeof (timestamp), 1, &timestamp);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_initialize_action_time = timestamp;
if ((vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
vd->vdev_offline) && !vd->vdev_top->vdev_rz_expanding) {
/* load progress for reporting, but don't resume */
VERIFY0(vdev_initialize_load(vd));
} else if (vd->vdev_initialize_state ==
VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd) &&
!vd->vdev_top->vdev_removing &&
!vd->vdev_top->vdev_rz_expanding &&
vd->vdev_initialize_thread == NULL) {
vdev_initialize(vd);
}
mutex_exit(&vd->vdev_initialize_lock);
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_initialize_restart(vd->vdev_child[i]);
}
}
EXPORT_SYMBOL(vdev_initialize);
EXPORT_SYMBOL(vdev_uninitialize);
EXPORT_SYMBOL(vdev_initialize_stop);
EXPORT_SYMBOL(vdev_initialize_stop_all);
EXPORT_SYMBOL(vdev_initialize_stop_wait);
EXPORT_SYMBOL(vdev_initialize_restart);
ZFS_MODULE_PARAM(zfs, zfs_, initialize_value, U64, ZMOD_RW,
"Value written during zpool initialize");
ZFS_MODULE_PARAM(zfs, zfs_, initialize_chunk_size, U64, ZMOD_RW,
"Size in bytes of writes by zpool initialize");
diff --git a/sys/contrib/openzfs/module/zfs/vdev_label.c b/sys/contrib/openzfs/module/zfs/vdev_label.c
index 9d12bc2eb0a2..2c4e0c1c4848 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_label.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_label.c
@@ -1,2166 +1,2167 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
/*
* Virtual Device Labels
* ---------------------
*
* The vdev label serves several distinct purposes:
*
* 1. Uniquely identify this device as part of a ZFS pool and confirm its
* identity within the pool.
*
* 2. Verify that all the devices given in a configuration are present
* within the pool.
*
* 3. Determine the uberblock for the pool.
*
* 4. In case of an import operation, determine the configuration of the
* toplevel vdev of which it is a part.
*
* 5. If an import operation cannot find all the devices in the pool,
* provide enough information to the administrator to determine which
* devices are missing.
*
* It is important to note that while the kernel is responsible for writing the
* label, it only consumes the information in the first three cases. The
* latter information is only consumed in userland when determining the
* configuration to import a pool.
*
*
* Label Organization
* ------------------
*
* Before describing the contents of the label, it's important to understand how
* the labels are written and updated with respect to the uberblock.
*
* When the pool configuration is altered, either because it was newly created
* or a device was added, we want to update all the labels such that we can deal
* with fatal failure at any point. To this end, each disk has two labels which
* are updated before and after the uberblock is synced. Assuming we have
* labels and an uberblock with the following transaction groups:
*
* L1 UB L2
* +------+ +------+ +------+
* | | | | | |
* | t10 | | t10 | | t10 |
* | | | | | |
* +------+ +------+ +------+
*
* In this stable state, the labels and the uberblock were all updated within
* the same transaction group (10). Each label is mirrored and checksummed, so
* that we can detect when we fail partway through writing the label.
*
* In order to identify which labels are valid, the labels are written in the
* following manner:
*
* 1. For each vdev, update 'L1' to the new label
* 2. Update the uberblock
* 3. For each vdev, update 'L2' to the new label
*
* Given arbitrary failure, we can determine the correct label to use based on
* the transaction group. If we fail after updating L1 but before updating the
* UB, we will notice that L1's transaction group is greater than the uberblock,
* so L2 must be valid. If we fail after writing the uberblock but before
* writing L2, we will notice that L2's transaction group is less than L1, and
* therefore L1 is valid.
*
* Another added complexity is that not every label is updated when the config
* is synced. If we add a single device, we do not want to have to re-write
* every label for every device in the pool. This means that both L1 and L2 may
* be older than the pool uberblock, because the necessary information is stored
* on another vdev.
*
*
* On-disk Format
* --------------
*
* The vdev label consists of two distinct parts, and is wrapped within the
* vdev_label_t structure. The label includes 8k of padding to permit legacy
* VTOC disk labels, but is otherwise ignored.
*
* The first half of the label is a packed nvlist which contains pool wide
* properties, per-vdev properties, and configuration information. It is
* described in more detail below.
*
* The latter half of the label consists of a redundant array of uberblocks.
* These uberblocks are updated whenever a transaction group is committed,
* or when the configuration is updated. When a pool is loaded, we scan each
* vdev for the 'best' uberblock.
*
*
* Configuration Information
* -------------------------
*
* The nvlist describing the pool and vdev contains the following elements:
*
* version ZFS on-disk version
* name Pool name
* state Pool state
* txg Transaction group in which this label was written
* pool_guid Unique identifier for this pool
* vdev_tree An nvlist describing vdev tree.
* features_for_read
* An nvlist of the features necessary for reading the MOS.
*
* Each leaf device label also contains the following:
*
* top_guid Unique ID for top-level vdev in which this is contained
* guid Unique ID for the leaf vdev
*
* The 'vs' configuration follows the format described in 'spa_config.c'.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/zio.h>
#include <sys/dsl_scan.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/byteorder.h>
#include <sys/zfs_bootenv.h>
/*
* Basic routines to read and write from a vdev label.
* Used throughout the rest of this file.
*/
uint64_t
vdev_label_offset(uint64_t psize, int l, uint64_t offset)
{
ASSERT(offset < sizeof (vdev_label_t));
ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* Returns back the vdev label associated with the passed in offset.
*/
int
vdev_label_number(uint64_t psize, uint64_t offset)
{
int l;
if (offset >= psize - VDEV_LABEL_END_SIZE) {
offset -= psize - VDEV_LABEL_END_SIZE;
offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t);
}
l = offset / sizeof (vdev_label_t);
return (l < VDEV_LABELS ? l : -1);
}
static void
vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
ASSERT(
spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
zio_nowait(zio_read_phys(zio, vd,
vdev_label_offset(vd->vdev_psize, l, offset),
size, buf, ZIO_CHECKSUM_LABEL, done, private,
ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
}
void
vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
ASSERT(
spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
zio_nowait(zio_write_phys(zio, vd,
vdev_label_offset(vd->vdev_psize, l, offset),
size, buf, ZIO_CHECKSUM_LABEL, done, private,
ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
}
/*
* Generate the nvlist representing this vdev's stats
*/
void
vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
{
nvlist_t *nvx;
vdev_stat_t *vs;
vdev_stat_ex_t *vsx;
vs = kmem_alloc(sizeof (*vs), KM_SLEEP);
vsx = kmem_alloc(sizeof (*vsx), KM_SLEEP);
vdev_get_stats_ex(vd, vs, vsx);
fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t *)vs, sizeof (*vs) / sizeof (uint64_t));
/*
* Add extended stats into a special extended stats nvlist. This keeps
* all the extended stats nicely grouped together. The extended stats
* nvlist is then added to the main nvlist.
*/
nvx = fnvlist_alloc();
/* ZIOs in flight to disk */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SCRUB]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_TRIM]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_REBUILD]);
/* ZIOs pending */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SCRUB]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_TRIM]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_REBUILD]);
/* Histograms */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
vsx->vsx_total_histo[ZIO_TYPE_READ],
ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
vsx->vsx_total_histo[ZIO_TYPE_WRITE],
ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
vsx->vsx_disk_histo[ZIO_TYPE_READ],
ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
vsx->vsx_disk_histo[ZIO_TYPE_WRITE],
ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD]));
/* Request sizes */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD]));
/* IO delays */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SLOW_IOS, vs->vs_slow_ios);
/* Direct I/O write verify errors */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_DIO_VERIFY_ERRORS,
vs->vs_dio_verify_errors);
/* Add extended stats nvlist to main nvlist */
fnvlist_add_nvlist(nv, ZPOOL_CONFIG_VDEV_STATS_EX, nvx);
fnvlist_free(nvx);
kmem_free(vs, sizeof (*vs));
kmem_free(vsx, sizeof (*vsx));
}
static void
root_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
{
spa_t *spa = vd->vdev_spa;
if (vd != spa->spa_root_vdev)
return;
/* provide either current or previous scan information */
pool_scan_stat_t ps;
if (spa_scan_get_stats(spa, &ps) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
sizeof (pool_scan_stat_t) / sizeof (uint64_t));
}
pool_removal_stat_t prs;
if (spa_removal_get_stats(spa, &prs) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs,
sizeof (prs) / sizeof (uint64_t));
}
pool_checkpoint_stat_t pcs;
if (spa_checkpoint_get_stats(spa, &pcs) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t *)&pcs,
sizeof (pcs) / sizeof (uint64_t));
}
pool_raidz_expand_stat_t pres;
if (spa_raidz_expand_get_stats(spa, &pres) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t *)&pres,
sizeof (pres) / sizeof (uint64_t));
}
}
static void
top_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
{
if (vd == vd->vdev_top) {
vdev_rebuild_stat_t vrs;
if (vdev_rebuild_get_stats(vd, &vrs) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t *)&vrs,
sizeof (vrs) / sizeof (uint64_t));
}
}
}
/*
* Generate the nvlist representing this vdev's config.
*/
nvlist_t *
vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
vdev_config_flag_t flags)
{
nvlist_t *nv = NULL;
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
nv = fnvlist_alloc();
fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid);
if (vd->vdev_path != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path);
if (vd->vdev_devid != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
if (vd->vdev_physpath != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path);
if (vd->vdev_fru != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru);
if (vd->vdev_ops->vdev_op_config_generate != NULL)
vd->vdev_ops->vdev_op_config_generate(vd, nv);
if (vd->vdev_wholedisk != -1ULL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
vd->vdev_wholedisk);
}
if (vd->vdev_not_present && !(flags & VDEV_CONFIG_MISSING))
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
if (vd->vdev_isspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
if (flags & VDEV_CONFIG_L2CACHE)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
vd == vd->vdev_top) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
vd->vdev_ms_array);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
vd->vdev_ms_shift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
vd->vdev_asize);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
if (vd->vdev_noalloc) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
vd->vdev_noalloc);
}
/*
* Slog devices are removed synchronously so don't
* persist the vdev_removing flag to the label.
*/
if (vd->vdev_removing && !vd->vdev_islog) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
vd->vdev_removing);
}
/* zpool command expects alloc class data */
if (getstats && vd->vdev_alloc_bias != VDEV_BIAS_NONE) {
const char *bias = NULL;
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
bias = VDEV_ALLOC_BIAS_LOG;
break;
case VDEV_BIAS_SPECIAL:
bias = VDEV_ALLOC_BIAS_SPECIAL;
break;
case VDEV_BIAS_DEDUP:
bias = VDEV_ALLOC_BIAS_DEDUP;
break;
default:
ASSERT3U(vd->vdev_alloc_bias, ==,
VDEV_BIAS_NONE);
}
fnvlist_add_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
bias);
}
}
if (vd->vdev_dtl_sm != NULL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
space_map_object(vd->vdev_dtl_sm));
}
if (vic->vic_mapping_object != 0) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
vic->vic_mapping_object);
}
if (vic->vic_births_object != 0) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
vic->vic_births_object);
}
if (vic->vic_prev_indirect_vdev != UINT64_MAX) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
vic->vic_prev_indirect_vdev);
}
if (vd->vdev_crtxg)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
if (vd->vdev_expansion_time)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_EXPANSION_TIME,
vd->vdev_expansion_time);
if (flags & VDEV_CONFIG_MOS) {
if (vd->vdev_leaf_zap != 0) {
ASSERT(vd->vdev_ops->vdev_op_leaf);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP,
vd->vdev_leaf_zap);
}
if (vd->vdev_top_zap != 0) {
ASSERT(vd == vd->vdev_top);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
vd->vdev_top_zap);
}
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap != 0 &&
spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
vd->vdev_root_zap);
}
if (vd->vdev_resilver_deferred) {
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(spa->spa_resilver_deferred);
fnvlist_add_boolean(nv, ZPOOL_CONFIG_RESILVER_DEFER);
}
}
if (getstats) {
vdev_config_generate_stats(vd, nv);
root_vdev_actions_getprogress(vd, nv);
top_vdev_actions_getprogress(vd, nv);
/*
* Note: this can be called from open context
* (spa_get_stats()), so we need the rwlock to prevent
* the mapping from being changed by condensing.
*/
rw_enter(&vd->vdev_indirect_rwlock, RW_READER);
if (vd->vdev_indirect_mapping != NULL) {
ASSERT(vd->vdev_indirect_births != NULL);
vdev_indirect_mapping_t *vim =
vd->vdev_indirect_mapping;
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
vdev_indirect_mapping_size(vim));
}
rw_exit(&vd->vdev_indirect_rwlock);
if (vd->vdev_mg != NULL &&
vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) {
/*
* Compute approximately how much memory would be used
* for the indirect mapping if this device were to
* be removed.
*
* Note: If the frag metric is invalid, then not
* enough metaslabs have been converted to have
* histograms.
*/
uint64_t seg_count = 0;
uint64_t to_alloc = vd->vdev_stat.vs_alloc;
/*
* There are the same number of allocated segments
* as free segments, so we will have at least one
* entry per free segment. However, small free
* segments (smaller than vdev_removal_max_span)
* will be combined with adjacent allocated segments
* as a single mapping.
*/
- for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
+ for (int i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE;
+ i++) {
if (i + 1 < highbit64(vdev_removal_max_span)
- 1) {
to_alloc +=
vd->vdev_mg->mg_histogram[i] <<
(i + 1);
} else {
seg_count +=
vd->vdev_mg->mg_histogram[i];
}
}
/*
* The maximum length of a mapping is
* zfs_remove_max_segment, so we need at least one entry
* per zfs_remove_max_segment of allocated data.
*/
seg_count += to_alloc / spa_remove_max_segment(spa);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
seg_count *
sizeof (vdev_indirect_mapping_entry_phys_t));
}
}
if (!vd->vdev_ops->vdev_op_leaf) {
nvlist_t **child;
uint64_t c;
ASSERT(!vd->vdev_ishole);
child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
for (c = 0; c < vd->vdev_children; c++) {
child[c] = vdev_config_generate(spa, vd->vdev_child[c],
getstats, flags);
}
fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t * const *)child, vd->vdev_children);
for (c = 0; c < vd->vdev_children; c++)
nvlist_free(child[c]);
kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
} else {
const char *aux = NULL;
if (vd->vdev_offline && !vd->vdev_tmpoffline)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE);
if (vd->vdev_resilver_txg != 0)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
vd->vdev_resilver_txg);
if (vd->vdev_rebuild_txg != 0)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
vd->vdev_rebuild_txg);
if (vd->vdev_faulted)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE);
if (vd->vdev_degraded)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE);
if (vd->vdev_removed)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE);
if (vd->vdev_unspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE);
if (vd->vdev_ishole)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE);
/* Set the reason why we're FAULTED/DEGRADED. */
switch (vd->vdev_stat.vs_aux) {
case VDEV_AUX_ERR_EXCEEDED:
aux = "err_exceeded";
break;
case VDEV_AUX_EXTERNAL:
aux = "external";
break;
}
if (aux != NULL && !vd->vdev_tmpoffline) {
fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux);
} else {
/*
* We're healthy - clear any previous AUX_STATE values.
*/
if (nvlist_exists(nv, ZPOOL_CONFIG_AUX_STATE))
nvlist_remove_all(nv, ZPOOL_CONFIG_AUX_STATE);
}
if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
vd->vdev_orig_guid);
}
}
return (nv);
}
/*
* Generate a view of the top-level vdevs. If we currently have holes
* in the namespace, then generate an array which contains a list of holey
* vdevs. Additionally, add the number of top-level children that currently
* exist.
*/
void
vdev_top_config_generate(spa_t *spa, nvlist_t *config)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *array;
uint_t c, idx;
array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_ishole) {
array[idx++] = c;
}
}
if (idx) {
VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
array, idx) == 0);
}
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
rvd->vdev_children) == 0);
kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
}
/*
* Returns the configuration from the label of the given vdev. For vdevs
* which don't have a txg value stored on their label (i.e. spares/cache)
* or have not been completely initialized (txg = 0) just return
* the configuration from the first valid label we find. Otherwise,
* find the most up-to-date label that does not exceed the specified
* 'txg' value.
*/
nvlist_t *
vdev_label_read_config(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *config = NULL;
vdev_phys_t *vp[VDEV_LABELS];
abd_t *vp_abd[VDEV_LABELS];
zio_t *zio[VDEV_LABELS];
uint64_t best_txg = 0;
uint64_t label_txg = 0;
int error = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE;
ASSERT(vd->vdev_validate_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (!vdev_readable(vd))
return (NULL);
/*
* The label for a dRAID distributed spare is not stored on disk.
* Instead it is generated when needed which allows us to bypass
* the pipeline when reading the config from the label.
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (vdev_draid_read_config_spare(vd));
for (int l = 0; l < VDEV_LABELS; l++) {
vp_abd[l] = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
vp[l] = abd_to_buf(vp_abd[l]);
}
retry:
for (int l = 0; l < VDEV_LABELS; l++) {
zio[l] = zio_root(spa, NULL, NULL, flags);
vdev_label_read(zio[l], vd, l, vp_abd[l],
offsetof(vdev_label_t, vl_vdev_phys), sizeof (vdev_phys_t),
NULL, NULL, flags);
}
for (int l = 0; l < VDEV_LABELS; l++) {
nvlist_t *label = NULL;
if (zio_wait(zio[l]) == 0 &&
nvlist_unpack(vp[l]->vp_nvlist, sizeof (vp[l]->vp_nvlist),
&label, 0) == 0) {
/*
* Auxiliary vdevs won't have txg values in their
* labels and newly added vdevs may not have been
* completely initialized so just return the
* configuration from the first valid label we
* encounter.
*/
error = nvlist_lookup_uint64(label,
ZPOOL_CONFIG_POOL_TXG, &label_txg);
if ((error || label_txg == 0) && !config) {
config = label;
for (l++; l < VDEV_LABELS; l++)
zio_wait(zio[l]);
break;
} else if (label_txg <= txg && label_txg > best_txg) {
best_txg = label_txg;
nvlist_free(config);
config = fnvlist_dup(label);
}
}
if (label != NULL) {
nvlist_free(label);
label = NULL;
}
}
if (config == NULL && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
/*
* We found a valid label but it didn't pass txg restrictions.
*/
if (config == NULL && label_txg != 0) {
vdev_dbgmsg(vd, "label discarded as txg is too large "
"(%llu > %llu)", (u_longlong_t)label_txg,
(u_longlong_t)txg);
}
for (int l = 0; l < VDEV_LABELS; l++) {
abd_free(vp_abd[l]);
}
return (config);
}
/*
* Determine if a device is in use. The 'spare_guid' parameter will be filled
* in with the device guid if this spare is active elsewhere on the system.
*/
static boolean_t
vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
uint64_t *spare_guid, uint64_t *l2cache_guid)
{
spa_t *spa = vd->vdev_spa;
uint64_t state, pool_guid, device_guid, txg, spare_pool;
uint64_t vdtxg = 0;
nvlist_t *label;
if (spare_guid)
*spare_guid = 0ULL;
if (l2cache_guid)
*l2cache_guid = 0ULL;
/*
* Read the label, if any, and perform some basic sanity checks.
*/
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL)
return (B_FALSE);
(void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
&vdtxg);
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
&device_guid) != 0) {
nvlist_free(label);
return (B_FALSE);
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0)) {
nvlist_free(label);
return (B_FALSE);
}
nvlist_free(label);
/*
* Check to see if this device indeed belongs to the pool it claims to
* be a part of. The only way this is allowed is if the device is a hot
* spare (which we check for later on).
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
!spa_guid_exists(pool_guid, device_guid) &&
!spa_spare_exists(device_guid, NULL, NULL) &&
!spa_l2cache_exists(device_guid, NULL))
return (B_FALSE);
/*
* If the transaction group is zero, then this an initialized (but
* unused) label. This is only an error if the create transaction
* on-disk is the same as the one we're using now, in which case the
* user has attempted to add the same vdev multiple times in the same
* transaction.
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
txg == 0 && vdtxg == crtxg)
return (B_TRUE);
/*
* Check to see if this is a spare device. We do an explicit check for
* spa_has_spare() here because it may be on our pending list of spares
* to add.
*/
if (spa_spare_exists(device_guid, &spare_pool, NULL) ||
spa_has_spare(spa, device_guid)) {
if (spare_guid)
*spare_guid = device_guid;
switch (reason) {
case VDEV_LABEL_CREATE:
return (B_TRUE);
case VDEV_LABEL_REPLACE:
return (!spa_has_spare(spa, device_guid) ||
spare_pool != 0ULL);
case VDEV_LABEL_SPARE:
return (spa_has_spare(spa, device_guid));
default:
break;
}
}
/*
* Check to see if this is an l2cache device.
*/
if (spa_l2cache_exists(device_guid, NULL) ||
spa_has_l2cache(spa, device_guid)) {
if (l2cache_guid)
*l2cache_guid = device_guid;
switch (reason) {
case VDEV_LABEL_CREATE:
return (B_TRUE);
case VDEV_LABEL_REPLACE:
return (!spa_has_l2cache(spa, device_guid));
case VDEV_LABEL_L2CACHE:
return (spa_has_l2cache(spa, device_guid));
default:
break;
}
}
/*
* We can't rely on a pool's state if it's been imported
* read-only. Instead we look to see if the pools is marked
* read-only in the namespace and set the state to active.
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(spa = spa_by_guid(pool_guid, device_guid)) != NULL &&
spa_mode(spa) == SPA_MODE_READ)
state = POOL_STATE_ACTIVE;
/*
* If the device is marked ACTIVE, then this device is in use by another
* pool on the system.
*/
return (state == POOL_STATE_ACTIVE);
}
static nvlist_t *
vdev_aux_label_generate(vdev_t *vd, boolean_t reason_spare)
{
/*
* For inactive hot spares and level 2 ARC devices, we generate
* a special label that identifies as a mutually shared hot
* spare or l2cache device. We write the label in case of
* addition or removal of hot spare or l2cache vdev (in which
* case we want to revert the labels).
*/
nvlist_t *label = fnvlist_alloc();
fnvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
spa_version(vd->vdev_spa));
fnvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE, reason_spare ?
POOL_STATE_SPARE : POOL_STATE_L2CACHE);
fnvlist_add_uint64(label, ZPOOL_CONFIG_GUID, vd->vdev_guid);
/*
* This is merely to facilitate reporting the ashift of the
* cache device through zdb. The actual retrieval of the
* ashift (in vdev_alloc()) uses the nvlist
* spa->spa_l2cache->sav_config (populated in
* spa_ld_open_aux_vdevs()).
*/
if (!reason_spare)
fnvlist_add_uint64(label, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
/*
* Add path information to help find it during pool import
*/
if (vd->vdev_path != NULL)
fnvlist_add_string(label, ZPOOL_CONFIG_PATH, vd->vdev_path);
if (vd->vdev_devid != NULL)
fnvlist_add_string(label, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
if (vd->vdev_physpath != NULL) {
fnvlist_add_string(label, ZPOOL_CONFIG_PHYS_PATH,
vd->vdev_physpath);
}
return (label);
}
/*
* Initialize a vdev label. We check to make sure each leaf device is not in
* use, and writable. We put down an initial label which we will later
* overwrite with a complete label. Note that it's important to do this
* sequentially, not in parallel, so that we catch cases of multiple use of the
* same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
* itself.
*/
int
vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *label;
vdev_phys_t *vp;
abd_t *vp_abd;
abd_t *bootenv;
uberblock_t *ub;
abd_t *ub_abd;
zio_t *zio;
char *buf;
size_t buflen;
int error;
uint64_t spare_guid = 0, l2cache_guid = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
boolean_t reason_spare = (reason == VDEV_LABEL_SPARE || (reason ==
VDEV_LABEL_REMOVE && vd->vdev_isspare));
boolean_t reason_l2cache = (reason == VDEV_LABEL_L2CACHE || (reason ==
VDEV_LABEL_REMOVE && vd->vdev_isl2cache));
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
for (int c = 0; c < vd->vdev_children; c++)
if ((error = vdev_label_init(vd->vdev_child[c],
crtxg, reason)) != 0)
return (error);
/* Track the creation time for this vdev */
vd->vdev_crtxg = crtxg;
if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa))
return (0);
/*
* Dead vdevs cannot be initialized.
*/
if (vdev_is_dead(vd))
return (SET_ERROR(EIO));
/*
* Determine if the vdev is in use.
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
return (SET_ERROR(EBUSY));
/*
* If this is a request to add or replace a spare or l2cache device
* that is in use elsewhere on the system, then we must update the
* guid (which was initialized to a random value) to reflect the
* actual GUID (which is shared between multiple pools).
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE &&
spare_guid != 0ULL) {
uint64_t guid_delta = spare_guid - vd->vdev_guid;
vd->vdev_guid += guid_delta;
for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
* If this is a replacement, then we want to fallthrough to the
* rest of the code. If we're adding a spare, then it's already
* labeled appropriately and we can just return.
*/
if (reason == VDEV_LABEL_SPARE)
return (0);
ASSERT(reason == VDEV_LABEL_REPLACE ||
reason == VDEV_LABEL_SPLIT);
}
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE &&
l2cache_guid != 0ULL) {
uint64_t guid_delta = l2cache_guid - vd->vdev_guid;
vd->vdev_guid += guid_delta;
for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
* If this is a replacement, then we want to fallthrough to the
* rest of the code. If we're adding an l2cache, then it's
* already labeled appropriately and we can just return.
*/
if (reason == VDEV_LABEL_L2CACHE)
return (0);
ASSERT(reason == VDEV_LABEL_REPLACE);
}
/*
* Initialize its label.
*/
vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
abd_zero(vp_abd, sizeof (vdev_phys_t));
vp = abd_to_buf(vp_abd);
/*
* Generate a label describing the pool and our top-level vdev.
* We mark it as being from txg 0 to indicate that it's not
* really part of an active pool just yet. The labels will
* be written again with a meaningful txg by spa_sync().
*/
if (reason_spare || reason_l2cache) {
label = vdev_aux_label_generate(vd, reason_spare);
/*
* When spare or l2cache (aux) vdev is added during pool
* creation, spa->spa_uberblock is not written until this
* point. Write it on next config sync.
*/
if (uberblock_verify(&spa->spa_uberblock))
spa->spa_aux_sync_uber = B_TRUE;
} else {
uint64_t txg = 0ULL;
if (reason == VDEV_LABEL_SPLIT)
txg = spa->spa_uberblock.ub_txg;
label = spa_config_generate(spa, vd, txg, B_FALSE);
/*
* Add our creation time. This allows us to detect multiple
* vdev uses as described above, and automatically expires if we
* fail.
*/
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
crtxg) == 0);
}
buf = vp->vp_nvlist;
buflen = sizeof (vp->vp_nvlist);
error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP);
if (error != 0) {
nvlist_free(label);
abd_free(vp_abd);
/* EFAULT means nvlist_pack ran out of room */
return (SET_ERROR(error == EFAULT ? ENAMETOOLONG : EINVAL));
}
/*
* Initialize uberblock template.
*/
ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE);
abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t));
abd_zero_off(ub_abd, sizeof (uberblock_t),
VDEV_UBERBLOCK_RING - sizeof (uberblock_t));
ub = abd_to_buf(ub_abd);
ub->ub_txg = 0;
/* Initialize the 2nd padding area. */
bootenv = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
abd_zero(bootenv, VDEV_PAD_SIZE);
/*
* Write everything in parallel.
*/
retry:
zio = zio_root(spa, NULL, NULL, flags);
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_write(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys),
sizeof (vdev_phys_t), NULL, NULL, flags);
/*
* Skip the 1st padding area.
* Zero out the 2nd padding area where it might have
* left over data from previous filesystem format.
*/
vdev_label_write(zio, vd, l, bootenv,
offsetof(vdev_label_t, vl_be),
VDEV_PAD_SIZE, NULL, NULL, flags);
vdev_label_write(zio, vd, l, ub_abd,
offsetof(vdev_label_t, vl_uberblock),
VDEV_UBERBLOCK_RING, NULL, NULL, flags);
}
error = zio_wait(zio);
if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
nvlist_free(label);
abd_free(bootenv);
abd_free(ub_abd);
abd_free(vp_abd);
/*
* If this vdev hasn't been previously identified as a spare, then we
* mark it as such only if a) we are labeling it as a spare, or b) it
* exists as a spare elsewhere in the system. Do the same for
* level 2 ARC devices.
*/
if (error == 0 && !vd->vdev_isspare &&
(reason == VDEV_LABEL_SPARE ||
spa_spare_exists(vd->vdev_guid, NULL, NULL)))
spa_spare_add(vd);
if (error == 0 && !vd->vdev_isl2cache &&
(reason == VDEV_LABEL_L2CACHE ||
spa_l2cache_exists(vd->vdev_guid, NULL)))
spa_l2cache_add(vd);
return (error);
}
/*
* Done callback for vdev_label_read_bootenv_impl. If this is the first
* callback to finish, store our abd in the callback pointer. Otherwise, we
* just free our abd and return.
*/
static void
vdev_label_read_bootenv_done(zio_t *zio)
{
zio_t *rio = zio->io_private;
abd_t **cbp = rio->io_private;
ASSERT3U(zio->io_size, ==, VDEV_PAD_SIZE);
if (zio->io_error == 0) {
mutex_enter(&rio->io_lock);
if (*cbp == NULL) {
/* Will free this buffer in vdev_label_read_bootenv. */
*cbp = zio->io_abd;
} else {
abd_free(zio->io_abd);
}
mutex_exit(&rio->io_lock);
} else {
abd_free(zio->io_abd);
}
}
static void
vdev_label_read_bootenv_impl(zio_t *zio, vdev_t *vd, int flags)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_label_read_bootenv_impl(zio, vd->vdev_child[c], flags);
/*
* We just use the first label that has a correct checksum; the
* bootloader should have rewritten them all to be the same on boot,
* and any changes we made since boot have been the same across all
* labels.
*/
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_read(zio, vd, l,
abd_alloc_linear(VDEV_PAD_SIZE, B_FALSE),
offsetof(vdev_label_t, vl_be), VDEV_PAD_SIZE,
vdev_label_read_bootenv_done, zio, flags);
}
}
}
int
vdev_label_read_bootenv(vdev_t *rvd, nvlist_t *bootenv)
{
nvlist_t *config;
spa_t *spa = rvd->vdev_spa;
abd_t *abd = NULL;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
ASSERT(bootenv);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
zio_t *zio = zio_root(spa, NULL, &abd, flags);
vdev_label_read_bootenv_impl(zio, rvd, flags);
int err = zio_wait(zio);
if (abd != NULL) {
char *buf;
vdev_boot_envblock_t *vbe = abd_to_buf(abd);
vbe->vbe_version = ntohll(vbe->vbe_version);
switch (vbe->vbe_version) {
case VB_RAW:
/*
* if we have textual data in vbe_bootenv, create nvlist
* with key "envmap".
*/
fnvlist_add_uint64(bootenv, BOOTENV_VERSION, VB_RAW);
vbe->vbe_bootenv[sizeof (vbe->vbe_bootenv) - 1] = '\0';
fnvlist_add_string(bootenv, GRUB_ENVMAP,
vbe->vbe_bootenv);
break;
case VB_NVLIST:
err = nvlist_unpack(vbe->vbe_bootenv,
sizeof (vbe->vbe_bootenv), &config, 0);
if (err == 0) {
fnvlist_merge(bootenv, config);
nvlist_free(config);
break;
}
zfs_fallthrough;
default:
/* Check for FreeBSD zfs bootonce command string */
buf = abd_to_buf(abd);
if (*buf == '\0') {
fnvlist_add_uint64(bootenv, BOOTENV_VERSION,
VB_NVLIST);
break;
}
fnvlist_add_string(bootenv, FREEBSD_BOOTONCE, buf);
}
/*
* abd was allocated in vdev_label_read_bootenv_impl()
*/
abd_free(abd);
/*
* If we managed to read any successfully,
* return success.
*/
return (0);
}
return (err);
}
int
vdev_label_write_bootenv(vdev_t *vd, nvlist_t *env)
{
zio_t *zio;
spa_t *spa = vd->vdev_spa;
vdev_boot_envblock_t *bootenv;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
int error;
size_t nvsize;
char *nvbuf;
const char *tmp;
error = nvlist_size(env, &nvsize, NV_ENCODE_XDR);
if (error != 0)
return (SET_ERROR(error));
if (nvsize >= sizeof (bootenv->vbe_bootenv)) {
return (SET_ERROR(E2BIG));
}
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
error = ENXIO;
for (int c = 0; c < vd->vdev_children; c++) {
int child_err;
child_err = vdev_label_write_bootenv(vd->vdev_child[c], env);
/*
* As long as any of the disks managed to write all of their
* labels successfully, return success.
*/
if (child_err == 0)
error = child_err;
}
if (!vd->vdev_ops->vdev_op_leaf || vdev_is_dead(vd) ||
!vdev_writeable(vd)) {
return (error);
}
ASSERT3U(sizeof (*bootenv), ==, VDEV_PAD_SIZE);
abd_t *abd = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
abd_zero(abd, VDEV_PAD_SIZE);
bootenv = abd_borrow_buf_copy(abd, VDEV_PAD_SIZE);
nvbuf = bootenv->vbe_bootenv;
nvsize = sizeof (bootenv->vbe_bootenv);
bootenv->vbe_version = fnvlist_lookup_uint64(env, BOOTENV_VERSION);
switch (bootenv->vbe_version) {
case VB_RAW:
if (nvlist_lookup_string(env, GRUB_ENVMAP, &tmp) == 0) {
(void) strlcpy(bootenv->vbe_bootenv, tmp, nvsize);
}
error = 0;
break;
case VB_NVLIST:
error = nvlist_pack(env, &nvbuf, &nvsize, NV_ENCODE_XDR,
KM_SLEEP);
break;
default:
error = EINVAL;
break;
}
if (error == 0) {
bootenv->vbe_version = htonll(bootenv->vbe_version);
abd_return_buf_copy(abd, bootenv, VDEV_PAD_SIZE);
} else {
abd_free(abd);
return (SET_ERROR(error));
}
retry:
zio = zio_root(spa, NULL, NULL, flags);
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_write(zio, vd, l, abd,
offsetof(vdev_label_t, vl_be),
VDEV_PAD_SIZE, NULL, NULL, flags);
}
error = zio_wait(zio);
if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
abd_free(abd);
return (error);
}
/*
* ==========================================================================
* uberblock load/sync
* ==========================================================================
*/
/*
* Consider the following situation: txg is safely synced to disk. We've
* written the first uberblock for txg + 1, and then we lose power. When we
* come back up, we fail to see the uberblock for txg + 1 because, say,
* it was on a mirrored device and the replica to which we wrote txg + 1
* is now offline. If we then make some changes and sync txg + 1, and then
* the missing replica comes back, then for a few seconds we'll have two
* conflicting uberblocks on disk with the same txg. The solution is simple:
* among uberblocks with equal txg, choose the one with the latest timestamp.
*/
static int
vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
{
int cmp = TREE_CMP(ub1->ub_txg, ub2->ub_txg);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
if (likely(cmp))
return (cmp);
/*
* If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware
* ZFS, e.g. OpenZFS >= 0.7.
*
* If one ub has MMP and the other does not, they were written by
* different hosts, which matters for MMP. So we treat no MMP/no SEQ as
* a 0 value.
*
* Since timestamp and txg are the same if we get this far, either is
* acceptable for importing the pool.
*/
unsigned int seq1 = 0;
unsigned int seq2 = 0;
if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
seq1 = MMP_SEQ(ub1);
if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
seq2 = MMP_SEQ(ub2);
return (TREE_CMP(seq1, seq2));
}
struct ubl_cbdata {
uberblock_t ubl_latest; /* Most recent uberblock */
uberblock_t *ubl_ubbest; /* Best uberblock (w/r/t max_txg) */
vdev_t *ubl_vd; /* vdev associated with the above */
};
static void
vdev_uberblock_load_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
spa_t *spa = zio->io_spa;
zio_t *rio = zio->io_private;
uberblock_t *ub = abd_to_buf(zio->io_abd);
struct ubl_cbdata *cbp = rio->io_private;
ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd));
if (zio->io_error == 0 && uberblock_verify(ub) == 0) {
mutex_enter(&rio->io_lock);
if (vdev_uberblock_compare(ub, &cbp->ubl_latest) > 0) {
cbp->ubl_latest = *ub;
}
if (ub->ub_txg <= spa->spa_load_max_txg &&
vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) {
/*
* Keep track of the vdev in which this uberblock
* was found. We will use this information later
* to obtain the config nvlist associated with
* this uberblock.
*/
*cbp->ubl_ubbest = *ub;
cbp->ubl_vd = vd;
}
mutex_exit(&rio->io_lock);
}
abd_free(zio->io_abd);
}
static void
vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags,
struct ubl_cbdata *cbp)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp);
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd) &&
vd->vdev_ops != &vdev_draid_spare_ops) {
for (int l = 0; l < VDEV_LABELS; l++) {
for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
vdev_label_read(zio, vd, l,
abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd),
B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n),
VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_load_done, zio, flags);
}
}
}
}
/*
* Reads the 'best' uberblock from disk along with its associated
* configuration. First, we read the uberblock array of each label of each
* vdev, keeping track of the uberblock with the highest txg in each array.
* Then, we read the configuration from the same vdev as the best uberblock.
*/
void
vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
{
zio_t *zio;
spa_t *spa = rvd->vdev_spa;
struct ubl_cbdata cb;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
ASSERT(ub);
ASSERT(config);
memset(ub, 0, sizeof (uberblock_t));
memset(&cb, 0, sizeof (cb));
*config = NULL;
cb.ubl_ubbest = ub;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
zio = zio_root(spa, NULL, &cb, flags);
vdev_uberblock_load_impl(zio, rvd, flags, &cb);
(void) zio_wait(zio);
/*
* It's possible that the best uberblock was discovered on a label
* that has a configuration which was written in a future txg.
* Search all labels on this vdev to find the configuration that
* matches the txg for our uberblock.
*/
if (cb.ubl_vd != NULL) {
vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
"txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
if (ub->ub_raidz_reflow_info !=
cb.ubl_latest.ub_raidz_reflow_info) {
vdev_dbgmsg(cb.ubl_vd,
"spa=%s best uberblock (txg=%llu info=0x%llx) "
"has different raidz_reflow_info than latest "
"uberblock (txg=%llu info=0x%llx)",
spa->spa_name,
(u_longlong_t)ub->ub_txg,
(u_longlong_t)ub->ub_raidz_reflow_info,
(u_longlong_t)cb.ubl_latest.ub_txg,
(u_longlong_t)cb.ubl_latest.ub_raidz_reflow_info);
memset(ub, 0, sizeof (uberblock_t));
spa_config_exit(spa, SCL_ALL, FTAG);
return;
}
*config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
if (*config == NULL && spa->spa_extreme_rewind) {
vdev_dbgmsg(cb.ubl_vd, "failed to read label config. "
"Trying again without txg restrictions.");
*config = vdev_label_read_config(cb.ubl_vd, UINT64_MAX);
}
if (*config == NULL) {
vdev_dbgmsg(cb.ubl_vd, "failed to read label config");
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* For use when a leaf vdev is expanded.
* The location of labels 2 and 3 changed, and at the new location the
* uberblock rings are either empty or contain garbage. The sync will write
* new configs there because the vdev is dirty, but expansion also needs the
* uberblock rings copied. Read them from label 0 which did not move.
*
* Since the point is to populate labels {2,3} with valid uberblocks,
* we zero uberblocks we fail to read or which are not valid.
*/
static void
vdev_copy_uberblocks(vdev_t *vd)
{
abd_t *ub_abd;
zio_t *write_zio;
int locks = (SCL_L2ARC | SCL_ZIO);
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE;
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_READER) ==
SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* No uberblocks are stored on distributed spares, they may be
* safely skipped when expanding a leaf vdev.
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return;
spa_config_enter(vd->vdev_spa, locks, FTAG, RW_READER);
ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
write_zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
const int src_label = 0;
zio_t *zio;
zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
vdev_label_read(zio, vd, src_label, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
NULL, NULL, flags);
if (zio_wait(zio) || uberblock_verify(abd_to_buf(ub_abd)))
abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
for (int l = 2; l < VDEV_LABELS; l++)
vdev_label_write(write_zio, vd, l, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n),
VDEV_UBERBLOCK_SIZE(vd), NULL, NULL,
flags | ZIO_FLAG_DONT_PROPAGATE);
}
(void) zio_wait(write_zio);
spa_config_exit(vd->vdev_spa, locks, FTAG);
abd_free(ub_abd);
}
/*
* On success, increment root zio's count of good writes.
* We only get credit for writes to known-visible vdevs; see spa_vdev_add().
*/
static void
vdev_uberblock_sync_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0)
atomic_inc_64(good_writes);
}
/*
* Write the uberblock to all labels of all leaves of the specified vdev.
*/
static void
vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
uberblock_t *ub, vdev_t *vd, int flags)
{
for (uint64_t c = 0; c < vd->vdev_children; c++) {
vdev_uberblock_sync(zio, good_writes,
ub, vd->vdev_child[c], flags);
}
if (!vd->vdev_ops->vdev_op_leaf)
return;
if (!vdev_writeable(vd))
return;
/*
* There's no need to write uberblocks to a distributed spare, they
* are already stored on all the leaves of the parent dRAID. For
* this same reason vdev_uberblock_load_impl() skips distributed
* spares when reading uberblocks.
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return;
/* If the vdev was expanded, need to copy uberblock rings. */
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
vd->vdev_copy_uberblocks == B_TRUE) {
vdev_copy_uberblocks(vd);
vd->vdev_copy_uberblocks = B_FALSE;
}
/*
* We chose a slot based on the txg. If this uberblock has a special
* RAIDZ expansion state, then it is essentially an update of the
* current uberblock (it has the same txg). However, the current
* state is committed, so we want to write it to a different slot. If
* we overwrote the same slot, and we lose power during the uberblock
* write, and the disk does not do single-sector overwrites
* atomically (even though it is required to - i.e. we should see
* either the old or the new uberblock), then we could lose this
* txg's uberblock. Rewinding to the previous txg's uberblock may not
* be possible because RAIDZ expansion may have already overwritten
* some of the data, so we need the progress indicator in the
* uberblock.
*/
int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
int n = (ub->ub_txg - (RRSS_GET_STATE(ub) == RRSS_SCRATCH_VALID)) %
(VDEV_UBERBLOCK_COUNT(vd) - m);
/* Copy the uberblock_t into the ABD */
abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
abd_zero_off(ub_abd, sizeof (uberblock_t),
VDEV_UBERBLOCK_SIZE(vd) - sizeof (uberblock_t));
for (int l = 0; l < VDEV_LABELS; l++)
vdev_label_write(zio, vd, l, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_sync_done, good_writes,
flags | ZIO_FLAG_DONT_PROPAGATE);
abd_free(ub_abd);
}
/* Sync the uberblocks to all vdevs in svd[] */
int
vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
{
spa_t *spa = svd[0]->vdev_spa;
zio_t *zio;
uint64_t good_writes = 0;
zio = zio_root(spa, NULL, NULL, flags);
for (int v = 0; v < svdcount; v++)
vdev_uberblock_sync(zio, &good_writes, ub, svd[v], flags);
if (spa->spa_aux_sync_uber) {
for (int v = 0; v < spa->spa_spares.sav_count; v++) {
vdev_uberblock_sync(zio, &good_writes, ub,
spa->spa_spares.sav_vdevs[v], flags);
}
for (int v = 0; v < spa->spa_l2cache.sav_count; v++) {
vdev_uberblock_sync(zio, &good_writes, ub,
spa->spa_l2cache.sav_vdevs[v], flags);
}
}
(void) zio_wait(zio);
/*
* Flush the uberblocks to disk. This ensures that the odd labels
* are no longer needed (because the new uberblocks and the even
* labels are safely on disk), so it is safe to overwrite them.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (int v = 0; v < svdcount; v++) {
if (vdev_writeable(svd[v])) {
zio_flush(zio, svd[v]);
}
}
if (spa->spa_aux_sync_uber) {
spa->spa_aux_sync_uber = B_FALSE;
for (int v = 0; v < spa->spa_spares.sav_count; v++) {
if (vdev_writeable(spa->spa_spares.sav_vdevs[v])) {
zio_flush(zio, spa->spa_spares.sav_vdevs[v]);
}
}
for (int v = 0; v < spa->spa_l2cache.sav_count; v++) {
if (vdev_writeable(spa->spa_l2cache.sav_vdevs[v])) {
zio_flush(zio, spa->spa_l2cache.sav_vdevs[v]);
}
}
}
(void) zio_wait(zio);
return (good_writes >= 1 ? 0 : EIO);
}
/*
* On success, increment the count of good writes for our top-level vdev.
*/
static void
vdev_label_sync_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0)
atomic_inc_64(good_writes);
}
/*
* If there weren't enough good writes, indicate failure to the parent.
*/
static void
vdev_label_sync_top_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (*good_writes == 0)
zio->io_error = SET_ERROR(EIO);
kmem_free(good_writes, sizeof (uint64_t));
}
/*
* We ignore errors for log and cache devices, simply free the private data.
*/
static void
vdev_label_sync_ignore_done(zio_t *zio)
{
kmem_free(zio->io_private, sizeof (uint64_t));
}
/*
* Write all even or odd labels to all leaves of the specified vdev.
*/
static void
vdev_label_sync(zio_t *zio, uint64_t *good_writes,
vdev_t *vd, int l, uint64_t txg, int flags)
{
nvlist_t *label;
vdev_phys_t *vp;
abd_t *vp_abd;
char *buf;
size_t buflen;
vdev_t *pvd = vd->vdev_parent;
boolean_t spare_in_use = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_label_sync(zio, good_writes,
vd->vdev_child[c], l, txg, flags);
}
if (!vd->vdev_ops->vdev_op_leaf)
return;
if (!vdev_writeable(vd))
return;
/*
* The top-level config never needs to be written to a distributed
* spare. When read vdev_dspare_label_read_config() will generate
* the config for the vdev_label_read_config().
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return;
if (pvd && pvd->vdev_ops == &vdev_spare_ops)
spare_in_use = B_TRUE;
/*
* Generate a label describing the top-level config to which we belong.
*/
if ((vd->vdev_isspare && !spare_in_use) || vd->vdev_isl2cache) {
label = vdev_aux_label_generate(vd, vd->vdev_isspare);
} else {
label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE);
}
vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
abd_zero(vp_abd, sizeof (vdev_phys_t));
vp = abd_to_buf(vp_abd);
buf = vp->vp_nvlist;
buflen = sizeof (vp->vp_nvlist);
if (!nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP)) {
for (; l < VDEV_LABELS; l += 2) {
vdev_label_write(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys),
sizeof (vdev_phys_t),
vdev_label_sync_done, good_writes,
flags | ZIO_FLAG_DONT_PROPAGATE);
}
}
abd_free(vp_abd);
nvlist_free(label);
}
static int
vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
{
list_t *dl = &spa->spa_config_dirty_list;
vdev_t *vd;
zio_t *zio;
int error;
/*
* Write the new labels to disk.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
uint64_t *good_writes;
ASSERT(!vd->vdev_ishole);
good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
zio_t *vio = zio_null(zio, spa, NULL,
(vd->vdev_islog || vd->vdev_aux != NULL) ?
vdev_label_sync_ignore_done : vdev_label_sync_top_done,
good_writes, flags);
vdev_label_sync(vio, good_writes, vd, l, txg, flags);
zio_nowait(vio);
}
/*
* AUX path may have changed during import
*/
spa_aux_vdev_t *sav[2] = {&spa->spa_spares, &spa->spa_l2cache};
for (int i = 0; i < 2; i++) {
for (int v = 0; v < sav[i]->sav_count; v++) {
uint64_t *good_writes;
if (!sav[i]->sav_label_sync)
continue;
good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
zio_t *vio = zio_null(zio, spa, NULL,
vdev_label_sync_ignore_done, good_writes, flags);
vdev_label_sync(vio, good_writes, sav[i]->sav_vdevs[v],
l, txg, flags);
zio_nowait(vio);
}
}
error = zio_wait(zio);
/*
* Flush the new labels to disk.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd))
zio_flush(zio, vd);
for (int i = 0; i < 2; i++) {
if (!sav[i]->sav_label_sync)
continue;
for (int v = 0; v < sav[i]->sav_count; v++)
zio_flush(zio, sav[i]->sav_vdevs[v]);
if (l == 1)
sav[i]->sav_label_sync = B_FALSE;
}
(void) zio_wait(zio);
return (error);
}
/*
* Sync the uberblock and any changes to the vdev configuration.
*
* The order of operations is carefully crafted to ensure that
* if the system panics or loses power at any time, the state on disk
* is still transactionally consistent. The in-line comments below
* describe the failure semantics at each stage.
*
* Moreover, vdev_config_sync() is designed to be idempotent: if it fails
* at any time, you can just call it again, and it will resume its work.
*/
int
vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg)
{
spa_t *spa = svd[0]->vdev_spa;
uberblock_t *ub = &spa->spa_uberblock;
int error = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
ASSERT(svdcount != 0);
retry:
/*
* Normally, we don't want to try too hard to write every label and
* uberblock. If there is a flaky disk, we don't want the rest of the
* sync process to block while we retry. But if we can't write a
* single label out, we should retry with ZIO_FLAG_TRYHARD before
* bailing out and declaring the pool faulted.
*/
if (error != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0)
return (error);
flags |= ZIO_FLAG_TRYHARD;
}
ASSERT(ub->ub_txg <= txg);
/*
* If this isn't a resync due to I/O errors,
* and nothing changed in this transaction group,
* and multihost protection isn't enabled,
* and the vdev configuration hasn't changed,
* then there's nothing to do.
*/
if (ub->ub_txg < txg) {
boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
txg, spa->spa_mmp.mmp_delay);
if (!changed && list_is_empty(&spa->spa_config_dirty_list) &&
!spa_multihost(spa))
return (0);
}
if (txg > spa_freeze_txg(spa))
return (0);
ASSERT(txg <= spa->spa_final_txg);
/*
* Flush the write cache of every disk that's been written to
* in this transaction group. This ensures that all blocks
* written in this txg will be committed to stable storage
* before any uberblock that references them.
*/
zio_t *zio = zio_root(spa, NULL, NULL, flags);
for (vdev_t *vd =
txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd != NULL;
vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)))
zio_flush(zio, vd);
(void) zio_wait(zio);
/*
* Sync out the even labels (L0, L2) for every dirty vdev. If the
* system dies in the middle of this process, that's OK: all of the
* even labels that made it to disk will be newer than any uberblock,
* and will therefore be considered invalid. The odd labels (L1, L3),
* which have not yet been touched, will still be valid. We flush
* the new labels to disk to ensure that all even-label updates
* are committed to stable storage before the uberblock update.
*/
if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0) {
zfs_dbgmsg("vdev_label_sync_list() returned error %d "
"for pool '%s' when syncing out the even labels "
"of dirty vdevs", error, spa_name(spa));
}
goto retry;
}
/*
* Sync the uberblocks to all vdevs in svd[].
* If the system dies in the middle of this step, there are two cases
* to consider, and the on-disk state is consistent either way:
*
* (1) If none of the new uberblocks made it to disk, then the
* previous uberblock will be the newest, and the odd labels
* (which had not yet been touched) will be valid with respect
* to that uberblock.
*
* (2) If one or more new uberblocks made it to disk, then they
* will be the newest, and the even labels (which had all
* been successfully committed) will be valid with respect
* to the new uberblocks.
*/
if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0) {
zfs_dbgmsg("vdev_uberblock_sync_list() returned error "
"%d for pool '%s'", error, spa_name(spa));
}
goto retry;
}
if (spa_multihost(spa))
mmp_update_uberblock(spa, ub);
/*
* Sync out odd labels for every dirty vdev. If the system dies
* in the middle of this process, the even labels and the new
* uberblocks will suffice to open the pool. The next time
* the pool is opened, the first thing we'll do -- before any
* user data is modified -- is mark every vdev dirty so that
* all labels will be brought up to date. We flush the new labels
* to disk to ensure that all odd-label updates are committed to
* stable storage before the next transaction group begins.
*/
if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0) {
zfs_dbgmsg("vdev_label_sync_list() returned error %d "
"for pool '%s' when syncing out the odd labels of "
"dirty vdevs", error, spa_name(spa));
}
goto retry;
}
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz.c b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
index 6103f780e6bc..59225e766ba1 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
@@ -1,5122 +1,5123 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2016 Gvozden Nešković. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zap.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu_tx.h>
#include <sys/abd.h>
#include <sys/zfs_rlock.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_raidz_impl.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/dsl_scan.h>
#ifdef ZFS_DEBUG
#include <sys/vdev.h> /* For vdev_xlate() in vdev_raidz_io_verify() */
#endif
/*
* Virtual device vector for RAID-Z.
*
* This vdev supports single, double, and triple parity. For single parity,
* we use a simple XOR of all the data columns. For double or triple parity,
* we use a special case of Reed-Solomon coding. This extends the
* technique described in "The mathematics of RAID-6" by H. Peter Anvin by
* drawing on the system described in "A Tutorial on Reed-Solomon Coding for
* Fault-Tolerance in RAID-like Systems" by James S. Plank on which the
* former is also based. The latter is designed to provide higher performance
* for writes.
*
* Note that the Plank paper claimed to support arbitrary N+M, but was then
* amended six years later identifying a critical flaw that invalidates its
* claims. Nevertheless, the technique can be adapted to work for up to
* triple parity. For additional parity, the amendment "Note: Correction to
* the 1997 Tutorial on Reed-Solomon Coding" by James S. Plank and Ying Ding
* is viable, but the additional complexity means that write performance will
* suffer.
*
* All of the methods above operate on a Galois field, defined over the
* integers mod 2^N. In our case we choose N=8 for GF(8) so that all elements
* can be expressed with a single byte. Briefly, the operations on the
* field are defined as follows:
*
* o addition (+) is represented by a bitwise XOR
* o subtraction (-) is therefore identical to addition: A + B = A - B
* o multiplication of A by 2 is defined by the following bitwise expression:
*
* (A * 2)_7 = A_6
* (A * 2)_6 = A_5
* (A * 2)_5 = A_4
* (A * 2)_4 = A_3 + A_7
* (A * 2)_3 = A_2 + A_7
* (A * 2)_2 = A_1 + A_7
* (A * 2)_1 = A_0
* (A * 2)_0 = A_7
*
* In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)).
* As an aside, this multiplication is derived from the error correcting
* primitive polynomial x^8 + x^4 + x^3 + x^2 + 1.
*
* Observe that any number in the field (except for 0) can be expressed as a
* power of 2 -- a generator for the field. We store a table of the powers of
* 2 and logs base 2 for quick look ups, and exploit the fact that A * B can
* be rewritten as 2^(log_2(A) + log_2(B)) (where '+' is normal addition rather
* than field addition). The inverse of a field element A (A^-1) is therefore
* A ^ (255 - 1) = A^254.
*
* The up-to-three parity columns, P, Q, R over several data columns,
* D_0, ... D_n-1, can be expressed by field operations:
*
* P = D_0 + D_1 + ... + D_n-2 + D_n-1
* Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1
* = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1
* R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
* = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
*
* We chose 1, 2, and 4 as our generators because 1 corresponds to the trivial
* XOR operation, and 2 and 4 can be computed quickly and generate linearly-
* independent coefficients. (There are no additional coefficients that have
* this property which is why the uncorrected Plank method breaks down.)
*
* See the reconstruction code below for how P, Q and R can used individually
* or in concert to recover missing data columns.
*/
#define VDEV_RAIDZ_P 0
#define VDEV_RAIDZ_Q 1
#define VDEV_RAIDZ_R 2
#define VDEV_RAIDZ_MUL_2(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0))
#define VDEV_RAIDZ_MUL_4(x) (VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x)))
/*
* We provide a mechanism to perform the field multiplication operation on a
* 64-bit value all at once rather than a byte at a time. This works by
* creating a mask from the top bit in each byte and using that to
* conditionally apply the XOR of 0x1d.
*/
#define VDEV_RAIDZ_64MUL_2(x, mask) \
{ \
(mask) = (x) & 0x8080808080808080ULL; \
(mask) = ((mask) << 1) - ((mask) >> 7); \
(x) = (((x) << 1) & 0xfefefefefefefefeULL) ^ \
((mask) & 0x1d1d1d1d1d1d1d1dULL); \
}
#define VDEV_RAIDZ_64MUL_4(x, mask) \
{ \
VDEV_RAIDZ_64MUL_2((x), mask); \
VDEV_RAIDZ_64MUL_2((x), mask); \
}
/*
* Big Theory Statement for how a RAIDZ VDEV is expanded
*
* An existing RAIDZ VDEV can be expanded by attaching a new disk. Expansion
* works with all three RAIDZ parity choices, including RAIDZ1, 2, or 3. VDEVs
* that have been previously expanded can be expanded again.
*
* The RAIDZ VDEV must be healthy (must be able to write to all the drives in
* the VDEV) when an expansion starts. And the expansion will pause if any
* disk in the VDEV fails, and resume once the VDEV is healthy again. All other
* operations on the pool can continue while an expansion is in progress (e.g.
* read/write, snapshot, zpool add, etc). Except zpool checkpoint, zpool trim,
* and zpool initialize which can't be run during an expansion. Following a
* reboot or export/import, the expansion resumes where it left off.
*
* == Reflowing the Data ==
*
* The expansion involves reflowing (copying) the data from the current set
* of disks to spread it across the new set which now has one more disk. This
* reflow operation is similar to reflowing text when the column width of a
* text editor window is expanded. The text doesn’t change but the location of
* the text changes to accommodate the new width. An example reflow result for
* a 4-wide RAIDZ1 to a 5-wide is shown below.
*
* Reflow End State
* Each letter indicates a parity group (logical stripe)
*
* Before expansion After Expansion
* D1 D2 D3 D4 D1 D2 D3 D4 D5
* +------+------+------+------+ +------+------+------+------+------+
* | | | | | | | | | | |
* | A | A | A | A | | A | A | A | A | B |
* | 1| 2| 3| 4| | 1| 2| 3| 4| 5|
* +------+------+------+------+ +------+------+------+------+------+
* | | | | | | | | | | |
* | B | B | C | C | | B | C | C | C | C |
* | 5| 6| 7| 8| | 6| 7| 8| 9| 10|
* +------+------+------+------+ +------+------+------+------+------+
* | | | | | | | | | | |
* | C | C | D | D | | D | D | E | E | E |
* | 9| 10| 11| 12| | 11| 12| 13| 14| 15|
* +------+------+------+------+ +------+------+------+------+------+
* | | | | | | | | | | |
* | E | E | E | E | --> | E | F | F | G | G |
* | 13| 14| 15| 16| | 16| 17| 18|p 19| 20|
* +------+------+------+------+ +------+------+------+------+------+
* | | | | | | | | | | |
* | F | F | G | G | | G | G | H | H | H |
* | 17| 18| 19| 20| | 21| 22| 23| 24| 25|
* +------+------+------+------+ +------+------+------+------+------+
* | | | | | | | | | | |
* | G | G | H | H | | H | I | I | J | J |
* | 21| 22| 23| 24| | 26| 27| 28| 29| 30|
* +------+------+------+------+ +------+------+------+------+------+
* | | | | | | | | | | |
* | H | H | I | I | | J | J | | | K |
* | 25| 26| 27| 28| | 31| 32| 33| 34| 35|
* +------+------+------+------+ +------+------+------+------+------+
*
* This reflow approach has several advantages. There is no need to read or
* modify the block pointers or recompute any block checksums. The reflow
* doesn’t need to know where the parity sectors reside. We can read and write
* data sequentially and the copy can occur in a background thread in open
* context. The design also allows for fast discovery of what data to copy.
*
* The VDEV metaslabs are processed, one at a time, to copy the block data to
* have it flow across all the disks. The metaslab is disabled for allocations
* during the copy. As an optimization, we only copy the allocated data which
* can be determined by looking at the metaslab range tree. During the copy we
* must maintain the redundancy guarantees of the RAIDZ VDEV (i.e., we still
* need to be able to survive losing parity count disks). This means we
* cannot overwrite data during the reflow that would be needed if a disk is
* lost.
*
* After the reflow completes, all newly-written blocks will have the new
* layout, i.e., they will have the parity to data ratio implied by the new
* number of disks in the RAIDZ group. Even though the reflow copies all of
* the allocated space (data and parity), it is only rearranged, not changed.
*
* This act of reflowing the data has a few implications about blocks
* that were written before the reflow completes:
*
* - Old blocks will still use the same amount of space (i.e., they will have
* the parity to data ratio implied by the old number of disks in the RAIDZ
* group).
* - Reading old blocks will be slightly slower than before the reflow, for
* two reasons. First, we will have to read from all disks in the RAIDZ
* VDEV, rather than being able to skip the children that contain only
* parity of this block (because the data of a single block is now spread
* out across all the disks). Second, in most cases there will be an extra
* bcopy, needed to rearrange the data back to its original layout in memory.
*
* == Scratch Area ==
*
* As we copy the block data, we can only progress to the point that writes
* will not overlap with blocks whose progress has not yet been recorded on
* disk. Since partially-copied rows are always read from the old location,
* we need to stop one row before the sector-wise overlap, to prevent any
* row-wise overlap. For example, in the diagram above, when we reflow sector
* B6 it will overwite the original location for B5.
*
* To get around this, a scratch space is used so that we can start copying
* without risking data loss by overlapping the row. As an added benefit, it
* improves performance at the beginning of the reflow, but that small perf
* boost wouldn't be worth the complexity on its own.
*
* Ideally we want to copy at least 2 * (new_width)^2 so that we have a
* separation of 2*(new_width+1) and a chunk size of new_width+2. With the max
* RAIDZ width of 255 and 4K sectors this would be 2MB per disk. In practice
* the widths will likely be single digits so we can get a substantial chuck
* size using only a few MB of scratch per disk.
*
* The scratch area is persisted to disk which holds a large amount of reflowed
* state. We can always read the partially written stripes when a disk fails or
* the copy is interrupted (crash) during the initial copying phase and also
* get past a small chunk size restriction. At a minimum, the scratch space
* must be large enough to get us to the point that one row does not overlap
* itself when moved (i.e new_width^2). But going larger is even better. We
* use the 3.5 MiB reserved "boot" space that resides after the ZFS disk labels
* as our scratch space to handle overwriting the initial part of the VDEV.
*
* 0 256K 512K 4M
* +------+------+-----------------------+-----------------------------
* | VDEV | VDEV | Boot Block (3.5M) | Allocatable space ...
* | L0 | L1 | Reserved | (Metaslabs)
* +------+------+-----------------------+-------------------------------
* Scratch Area
*
* == Reflow Progress Updates ==
* After the initial scratch-based reflow, the expansion process works
* similarly to device removal. We create a new open context thread which
* reflows the data, and periodically kicks off sync tasks to update logical
* state. In this case, state is the committed progress (offset of next data
* to copy). We need to persist the completed offset on disk, so that if we
* crash we know which format each VDEV offset is in.
*
* == Time Dependent Geometry ==
*
* In non-expanded RAIDZ, blocks are read from disk in a column by column
* fashion. For a multi-row block, the second sector is in the first column
* not in the second column. This allows us to issue full reads for each
* column directly into the request buffer. The block data is thus laid out
* sequentially in a column-by-column fashion.
*
* For example, in the before expansion diagram above, one logical block might
* be sectors G19-H26. The parity is in G19,H23; and the data is in
* G20,H24,G21,H25,G22,H26.
*
* After a block is reflowed, the sectors that were all in the original column
* data can now reside in different columns. When reading from an expanded
* VDEV, we need to know the logical stripe width for each block so we can
* reconstitute the block’s data after the reads are completed. Likewise,
* when we perform the combinatorial reconstruction we need to know the
* original width so we can retry combinations from the past layouts.
*
* Time dependent geometry is what we call having blocks with different layouts
* (stripe widths) in the same VDEV. This time-dependent geometry uses the
* block’s birth time (+ the time expansion ended) to establish the correct
* width for a given block. After an expansion completes, we record the time
* for blocks written with a particular width (geometry).
*
* == On Disk Format Changes ==
*
* New pool feature flag, 'raidz_expansion' whose reference count is the number
* of RAIDZ VDEVs that have been expanded.
*
* The blocks on expanded RAIDZ VDEV can have different logical stripe widths.
*
* Since the uberblock can point to arbitrary blocks, which might be on the
* expanding RAIDZ, and might or might not have been expanded. We need to know
* which way a block is laid out before reading it. This info is the next
* offset that needs to be reflowed and we persist that in the uberblock, in
* the new ub_raidz_reflow_info field, as opposed to the MOS or the vdev label.
* After the expansion is complete, we then use the raidz_expand_txgs array
* (see below) to determine how to read a block and the ub_raidz_reflow_info
* field no longer required.
*
* The uberblock's ub_raidz_reflow_info field also holds the scratch space
* state (i.e., active or not) which is also required before reading a block
* during the initial phase of reflowing the data.
*
* The top-level RAIDZ VDEV has two new entries in the nvlist:
*
* 'raidz_expand_txgs' array: logical stripe widths by txg are recorded here
* and used after the expansion is complete to
* determine how to read a raidz block
* 'raidz_expanding' boolean: present during reflow and removed after completion
* used during a spa import to resume an unfinished
* expansion
*
* And finally the VDEVs top zap adds the following informational entries:
* VDEV_TOP_ZAP_RAIDZ_EXPAND_STATE
* VDEV_TOP_ZAP_RAIDZ_EXPAND_START_TIME
* VDEV_TOP_ZAP_RAIDZ_EXPAND_END_TIME
* VDEV_TOP_ZAP_RAIDZ_EXPAND_BYTES_COPIED
*/
/*
* For testing only: pause the raidz expansion after reflowing this amount.
* (accessed by ZTS and ztest)
*/
#ifdef _KERNEL
static
#endif /* _KERNEL */
unsigned long raidz_expand_max_reflow_bytes = 0;
/*
* For testing only: pause the raidz expansion at a certain point.
*/
uint_t raidz_expand_pause_point = 0;
/*
* Maximum amount of copy io's outstanding at once.
*/
#ifdef _ILP32
static unsigned long raidz_expand_max_copy_bytes = SPA_MAXBLOCKSIZE;
#else
static unsigned long raidz_expand_max_copy_bytes = 10 * SPA_MAXBLOCKSIZE;
#endif
/*
* Apply raidz map abds aggregation if the number of rows in the map is equal
* or greater than the value below.
*/
static unsigned long raidz_io_aggregate_rows = 4;
/*
* Automatically start a pool scrub when a RAIDZ expansion completes in
* order to verify the checksums of all blocks which have been copied
* during the expansion. Automatic scrubbing is enabled by default and
* is strongly recommended.
*/
static int zfs_scrub_after_expand = 1;
static void
vdev_raidz_row_free(raidz_row_t *rr)
{
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size != 0)
abd_free(rc->rc_abd);
if (rc->rc_orig_data != NULL)
abd_free(rc->rc_orig_data);
}
if (rr->rr_abd_empty != NULL)
abd_free(rr->rr_abd_empty);
kmem_free(rr, offsetof(raidz_row_t, rr_col[rr->rr_scols]));
}
void
vdev_raidz_map_free(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++)
vdev_raidz_row_free(rm->rm_row[i]);
if (rm->rm_nphys_cols) {
for (int i = 0; i < rm->rm_nphys_cols; i++) {
if (rm->rm_phys_col[i].rc_abd != NULL)
abd_free(rm->rm_phys_col[i].rc_abd);
}
kmem_free(rm->rm_phys_col, sizeof (raidz_col_t) *
rm->rm_nphys_cols);
}
ASSERT3P(rm->rm_lr, ==, NULL);
kmem_free(rm, offsetof(raidz_map_t, rm_row[rm->rm_nrows]));
}
static void
vdev_raidz_map_free_vsd(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_map_free(rm);
}
static int
vdev_raidz_reflow_compare(const void *x1, const void *x2)
{
const reflow_node_t *l = x1;
const reflow_node_t *r = x2;
return (TREE_CMP(l->re_txg, r->re_txg));
}
const zio_vsd_ops_t vdev_raidz_vsd_ops = {
.vsd_free = vdev_raidz_map_free_vsd,
};
raidz_row_t *
vdev_raidz_row_alloc(int cols, zio_t *zio)
{
raidz_row_t *rr =
kmem_zalloc(offsetof(raidz_row_t, rr_col[cols]), KM_SLEEP);
rr->rr_cols = cols;
rr->rr_scols = cols;
for (int c = 0; c < cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_shadow_devidx = INT_MAX;
rc->rc_shadow_offset = UINT64_MAX;
/*
* We can not allow self healing to take place for Direct I/O
* reads. There is nothing that stops the buffer contents from
* being manipulated while the I/O is in flight. It is possible
* that the checksum could be verified on the buffer and then
* the contents of that buffer are manipulated afterwards. This
* could lead to bad data being written out during self
* healing.
*/
if (!(zio->io_flags & ZIO_FLAG_DIO_READ))
rc->rc_allow_repair = 1;
}
return (rr);
}
static void
vdev_raidz_map_alloc_write(zio_t *zio, raidz_map_t *rm, uint64_t ashift)
{
int c;
int nwrapped = 0;
uint64_t off = 0;
raidz_row_t *rr = rm->rm_row[0];
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(rm->rm_nrows, ==, 1);
/*
* Pad any parity columns with additional space to account for skip
* sectors.
*/
if (rm->rm_skipstart < rr->rr_firstdatacol) {
ASSERT0(rm->rm_skipstart);
nwrapped = rm->rm_nskip;
} else if (rr->rr_scols < (rm->rm_skipstart + rm->rm_nskip)) {
nwrapped =
(rm->rm_skipstart + rm->rm_nskip) % rr->rr_scols;
}
/*
* Optional single skip sectors (rc_size == 0) will be handled in
* vdev_raidz_io_start_write().
*/
int skipped = rr->rr_scols - rr->rr_cols;
/* Allocate buffers for the parity columns */
for (c = 0; c < rr->rr_firstdatacol; c++) {
raidz_col_t *rc = &rr->rr_col[c];
/*
* Parity columns will pad out a linear ABD to account for
* the skip sector. A linear ABD is used here because
* parity calculations use the ABD buffer directly to calculate
* parity. This avoids doing a memcpy back to the ABD after the
* parity has been calculated. By issuing the parity column
* with the skip sector we can reduce contention on the child
* VDEV queue locks (vq_lock).
*/
if (c < nwrapped) {
rc->rc_abd = abd_alloc_linear(
rc->rc_size + (1ULL << ashift), B_FALSE);
abd_zero_off(rc->rc_abd, rc->rc_size, 1ULL << ashift);
skipped++;
} else {
rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
}
}
for (off = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
abd_t *abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, off, rc->rc_size);
/*
* Generate I/O for skip sectors to improve aggregation
* continuity. We will use gang ABD's to reduce contention
* on the child VDEV queue locks (vq_lock) by issuing
* a single I/O that contains the data and skip sector.
*
* It is important to make sure that rc_size is not updated
* even though we are adding a skip sector to the ABD. When
* calculating the parity in vdev_raidz_generate_parity_row()
* the rc_size is used to iterate through the ABD's. We can
* not have zero'd out skip sectors used for calculating
* parity for raidz, because those same sectors are not used
* during reconstruction.
*/
if (c >= rm->rm_skipstart && skipped < rm->rm_nskip) {
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, abd, B_TRUE);
abd_gang_add(rc->rc_abd,
abd_get_zeros(1ULL << ashift), B_TRUE);
skipped++;
} else {
rc->rc_abd = abd;
}
off += rc->rc_size;
}
ASSERT3U(off, ==, zio->io_size);
ASSERT3S(skipped, ==, rm->rm_nskip);
}
static void
vdev_raidz_map_alloc_read(zio_t *zio, raidz_map_t *rm)
{
int c;
raidz_row_t *rr = rm->rm_row[0];
ASSERT3U(rm->rm_nrows, ==, 1);
/* Allocate buffers for the parity columns */
for (c = 0; c < rr->rr_firstdatacol; c++)
rr->rr_col[c].rc_abd =
abd_alloc_linear(rr->rr_col[c].rc_size, B_FALSE);
for (uint64_t off = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, off, rc->rc_size);
off += rc->rc_size;
}
}
/*
* Divides the IO evenly across all child vdevs; usually, dcols is
* the number of children in the target vdev.
*
* Avoid inlining the function to keep vdev_raidz_io_start(), which
* is this functions only caller, as small as possible on the stack.
*/
noinline raidz_map_t *
vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols,
uint64_t nparity)
{
raidz_row_t *rr;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = zio->io_offset >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = zio->io_size >> ashift;
/* The first column for this stripe. */
uint64_t f = b % dcols;
/* The starting byte offset on each child vdev. */
uint64_t o = (b / dcols) << ashift;
uint64_t acols, scols;
raidz_map_t *rm =
kmem_zalloc(offsetof(raidz_map_t, rm_row[1]), KM_SLEEP);
rm->rm_nrows = 1;
/*
* "Quotient": The number of data sectors for this stripe on all but
* the "big column" child vdevs that also contain "remainder" data.
*/
uint64_t q = s / (dcols - nparity);
/*
* "Remainder": The number of partial stripe data sectors in this I/O.
* This will add a sector to some, but not all, child vdevs.
*/
uint64_t r = s - q * (dcols - nparity);
/* The number of "big columns" - those which contain remainder data. */
uint64_t bc = (r == 0 ? 0 : r + nparity);
/*
* The total number of data and parity sectors associated with
* this I/O.
*/
uint64_t tot = s + nparity * (q + (r == 0 ? 0 : 1));
/*
* acols: The columns that will be accessed.
* scols: The columns that will be accessed or skipped.
*/
if (q == 0) {
/* Our I/O request doesn't span all child vdevs. */
acols = bc;
scols = MIN(dcols, roundup(bc, nparity + 1));
} else {
acols = dcols;
scols = dcols;
}
ASSERT3U(acols, <=, scols);
rr = vdev_raidz_row_alloc(scols, zio);
rm->rm_row[0] = rr;
rr->rr_cols = acols;
rr->rr_bigcols = bc;
rr->rr_firstdatacol = nparity;
#ifdef ZFS_DEBUG
rr->rr_offset = zio->io_offset;
rr->rr_size = zio->io_size;
#endif
uint64_t asize = 0;
for (uint64_t c = 0; c < scols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
uint64_t col = f + c;
uint64_t coff = o;
if (col >= dcols) {
col -= dcols;
coff += 1ULL << ashift;
}
rc->rc_devidx = col;
rc->rc_offset = coff;
if (c >= acols)
rc->rc_size = 0;
else if (c < bc)
rc->rc_size = (q + 1) << ashift;
else
rc->rc_size = q << ashift;
asize += rc->rc_size;
}
ASSERT3U(asize, ==, tot << ashift);
rm->rm_nskip = roundup(tot, nparity + 1) - tot;
rm->rm_skipstart = bc;
/*
* If all data stored spans all columns, there's a danger that parity
* will always be on the same device and, since parity isn't read
* during normal operation, that device's I/O bandwidth won't be
* used effectively. We therefore switch the parity every 1MB.
*
* ... at least that was, ostensibly, the theory. As a practical
* matter unless we juggle the parity between all devices evenly, we
* won't see any benefit. Further, occasional writes that aren't a
* multiple of the LCM of the number of children and the minimum
* stripe width are sufficient to avoid pessimal behavior.
* Unfortunately, this decision created an implicit on-disk format
* requirement that we need to support for all eternity, but only
* for single-parity RAID-Z.
*
* If we intend to skip a sector in the zeroth column for padding
* we must make sure to note this swap. We will never intend to
* skip the first column since at least one data and one parity
* column must appear in each row.
*/
ASSERT(rr->rr_cols >= 2);
ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
if (rr->rr_firstdatacol == 1 && (zio->io_offset & (1ULL << 20))) {
uint64_t devidx = rr->rr_col[0].rc_devidx;
o = rr->rr_col[0].rc_offset;
rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
rr->rr_col[1].rc_devidx = devidx;
rr->rr_col[1].rc_offset = o;
if (rm->rm_skipstart == 0)
rm->rm_skipstart = 1;
}
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_raidz_map_alloc_write(zio, rm, ashift);
} else {
vdev_raidz_map_alloc_read(zio, rm);
}
/* init RAIDZ parity ops */
rm->rm_ops = vdev_raidz_math_get_ops();
return (rm);
}
/*
* Everything before reflow_offset_synced should have been moved to the new
* location (read and write completed). However, this may not yet be reflected
* in the on-disk format (e.g. raidz_reflow_sync() has been called but the
* uberblock has not yet been written). If reflow is not in progress,
* reflow_offset_synced should be UINT64_MAX. For each row, if the row is
* entirely before reflow_offset_synced, it will come from the new location.
* Otherwise this row will come from the old location. Therefore, rows that
* straddle the reflow_offset_synced will come from the old location.
*
* For writes, reflow_offset_next is the next offset to copy. If a sector has
* been copied, but not yet reflected in the on-disk progress
* (reflow_offset_synced), it will also be written to the new (already copied)
* offset.
*/
noinline raidz_map_t *
vdev_raidz_map_alloc_expanded(zio_t *zio,
uint64_t ashift, uint64_t physical_cols, uint64_t logical_cols,
uint64_t nparity, uint64_t reflow_offset_synced,
uint64_t reflow_offset_next, boolean_t use_scratch)
{
abd_t *abd = zio->io_abd;
uint64_t offset = zio->io_offset;
uint64_t size = zio->io_size;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = size >> ashift;
/*
* "Quotient": The number of data sectors for this stripe on all but
* the "big column" child vdevs that also contain "remainder" data.
* AKA "full rows"
*/
uint64_t q = s / (logical_cols - nparity);
/*
* "Remainder": The number of partial stripe data sectors in this I/O.
* This will add a sector to some, but not all, child vdevs.
*/
uint64_t r = s - q * (logical_cols - nparity);
/* The number of "big columns" - those which contain remainder data. */
uint64_t bc = (r == 0 ? 0 : r + nparity);
/*
* The total number of data and parity sectors associated with
* this I/O.
*/
uint64_t tot = s + nparity * (q + (r == 0 ? 0 : 1));
/* How many rows contain data (not skip) */
uint64_t rows = howmany(tot, logical_cols);
int cols = MIN(tot, logical_cols);
raidz_map_t *rm =
kmem_zalloc(offsetof(raidz_map_t, rm_row[rows]),
KM_SLEEP);
rm->rm_nrows = rows;
rm->rm_nskip = roundup(tot, nparity + 1) - tot;
rm->rm_skipstart = bc;
uint64_t asize = 0;
for (uint64_t row = 0; row < rows; row++) {
boolean_t row_use_scratch = B_FALSE;
raidz_row_t *rr = vdev_raidz_row_alloc(cols, zio);
rm->rm_row[row] = rr;
/* The starting RAIDZ (parent) vdev sector of the row. */
uint64_t b = (offset >> ashift) + row * logical_cols;
/*
* If we are in the middle of a reflow, and the copying has
* not yet completed for any part of this row, then use the
* old location of this row. Note that reflow_offset_synced
* reflects the i/o that's been completed, because it's
* updated by a synctask, after zio_wait(spa_txg_zio[]).
* This is sufficient for our check, even if that progress
* has not yet been recorded to disk (reflected in
* spa_ubsync). Also note that we consider the last row to
* be "full width" (`cols`-wide rather than `bc`-wide) for
* this calculation. This causes a tiny bit of unnecessary
* double-writes but is safe and simpler to calculate.
*/
int row_phys_cols = physical_cols;
if (b + cols > reflow_offset_synced >> ashift)
row_phys_cols--;
else if (use_scratch)
row_use_scratch = B_TRUE;
/* starting child of this row */
uint64_t child_id = b % row_phys_cols;
/* The starting byte offset on each child vdev. */
uint64_t child_offset = (b / row_phys_cols) << ashift;
/*
* Note, rr_cols is the entire width of the block, even
* if this row is shorter. This is needed because parity
* generation (for Q and R) needs to know the entire width,
* because it treats the short row as though it was
* full-width (and the "phantom" sectors were zero-filled).
*
* Another approach to this would be to set cols shorter
* (to just the number of columns that we might do i/o to)
* and have another mechanism to tell the parity generation
* about the "entire width". Reconstruction (at least
* vdev_raidz_reconstruct_general()) would also need to
* know about the "entire width".
*/
rr->rr_firstdatacol = nparity;
#ifdef ZFS_DEBUG
/*
* note: rr_size is PSIZE, not ASIZE
*/
rr->rr_offset = b << ashift;
rr->rr_size = (rr->rr_cols - rr->rr_firstdatacol) << ashift;
#endif
for (int c = 0; c < rr->rr_cols; c++, child_id++) {
if (child_id >= row_phys_cols) {
child_id -= row_phys_cols;
child_offset += 1ULL << ashift;
}
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_devidx = child_id;
rc->rc_offset = child_offset;
/*
* Get this from the scratch space if appropriate.
* This only happens if we crashed in the middle of
* raidz_reflow_scratch_sync() (while it's running,
* the rangelock prevents us from doing concurrent
* io), and even then only during zpool import or
* when the pool is imported readonly.
*/
if (row_use_scratch)
rc->rc_offset -= VDEV_BOOT_SIZE;
uint64_t dc = c - rr->rr_firstdatacol;
if (c < rr->rr_firstdatacol) {
rc->rc_size = 1ULL << ashift;
/*
* Parity sectors' rc_abd's are set below
* after determining if this is an aggregation.
*/
} else if (row == rows - 1 && bc != 0 && c >= bc) {
/*
* Past the end of the block (even including
* skip sectors). This sector is part of the
* map so that we have full rows for p/q parity
* generation.
*/
rc->rc_size = 0;
rc->rc_abd = NULL;
} else {
/* "data column" (col excluding parity) */
uint64_t off;
if (c < bc || r == 0) {
off = dc * rows + row;
} else {
off = r * rows +
(dc - r) * (rows - 1) + row;
}
rc->rc_size = 1ULL << ashift;
rc->rc_abd = abd_get_offset_struct(
&rc->rc_abdstruct, abd, off << ashift,
rc->rc_size);
}
if (rc->rc_size == 0)
continue;
/*
* If any part of this row is in both old and new
* locations, the primary location is the old
* location. If this sector was already copied to the
* new location, we need to also write to the new,
* "shadow" location.
*
* Note, `row_phys_cols != physical_cols` indicates
* that the primary location is the old location.
* `b+c < reflow_offset_next` indicates that the copy
* to the new location has been initiated. We know
* that the copy has completed because we have the
* rangelock, which is held exclusively while the
* copy is in progress.
*/
if (row_use_scratch ||
(row_phys_cols != physical_cols &&
b + c < reflow_offset_next >> ashift)) {
rc->rc_shadow_devidx = (b + c) % physical_cols;
rc->rc_shadow_offset =
((b + c) / physical_cols) << ashift;
if (row_use_scratch)
rc->rc_shadow_offset -= VDEV_BOOT_SIZE;
}
asize += rc->rc_size;
}
/*
* See comment in vdev_raidz_map_alloc()
*/
if (rr->rr_firstdatacol == 1 && rr->rr_cols > 1 &&
(offset & (1ULL << 20))) {
ASSERT(rr->rr_cols >= 2);
ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
int devidx0 = rr->rr_col[0].rc_devidx;
uint64_t offset0 = rr->rr_col[0].rc_offset;
int shadow_devidx0 = rr->rr_col[0].rc_shadow_devidx;
uint64_t shadow_offset0 =
rr->rr_col[0].rc_shadow_offset;
rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
rr->rr_col[0].rc_shadow_devidx =
rr->rr_col[1].rc_shadow_devidx;
rr->rr_col[0].rc_shadow_offset =
rr->rr_col[1].rc_shadow_offset;
rr->rr_col[1].rc_devidx = devidx0;
rr->rr_col[1].rc_offset = offset0;
rr->rr_col[1].rc_shadow_devidx = shadow_devidx0;
rr->rr_col[1].rc_shadow_offset = shadow_offset0;
}
}
ASSERT3U(asize, ==, tot << ashift);
/*
* Determine if the block is contiguous, in which case we can use
* an aggregation.
*/
if (rows >= raidz_io_aggregate_rows) {
rm->rm_nphys_cols = physical_cols;
rm->rm_phys_col =
kmem_zalloc(sizeof (raidz_col_t) * rm->rm_nphys_cols,
KM_SLEEP);
/*
* Determine the aggregate io's offset and size, and check
* that the io is contiguous.
*/
for (int i = 0;
i < rm->rm_nrows && rm->rm_phys_col != NULL; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
raidz_col_t *prc =
&rm->rm_phys_col[rc->rc_devidx];
if (rc->rc_size == 0)
continue;
if (prc->rc_size == 0) {
ASSERT0(prc->rc_offset);
prc->rc_offset = rc->rc_offset;
} else if (prc->rc_offset + prc->rc_size !=
rc->rc_offset) {
/*
* This block is not contiguous and
* therefore can't be aggregated.
* This is expected to be rare, so
* the cost of allocating and then
* freeing rm_phys_col is not
* significant.
*/
kmem_free(rm->rm_phys_col,
sizeof (raidz_col_t) *
rm->rm_nphys_cols);
rm->rm_phys_col = NULL;
rm->rm_nphys_cols = 0;
break;
}
prc->rc_size += rc->rc_size;
}
}
}
if (rm->rm_phys_col != NULL) {
/*
* Allocate aggregate ABD's.
*/
for (int i = 0; i < rm->rm_nphys_cols; i++) {
raidz_col_t *prc = &rm->rm_phys_col[i];
prc->rc_devidx = i;
if (prc->rc_size == 0)
continue;
prc->rc_abd =
abd_alloc_linear(rm->rm_phys_col[i].rc_size,
B_FALSE);
}
/*
* Point the parity abd's into the aggregate abd's.
*/
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_firstdatacol; c++) {
raidz_col_t *rc = &rr->rr_col[c];
raidz_col_t *prc =
&rm->rm_phys_col[rc->rc_devidx];
rc->rc_abd =
abd_get_offset_struct(&rc->rc_abdstruct,
prc->rc_abd,
rc->rc_offset - prc->rc_offset,
rc->rc_size);
}
}
} else {
/*
* Allocate new abd's for the parity sectors.
*/
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_firstdatacol; c++) {
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_abd =
abd_alloc_linear(rc->rc_size,
B_TRUE);
}
}
}
/* init RAIDZ parity ops */
rm->rm_ops = vdev_raidz_math_get_ops();
return (rm);
}
struct pqr_struct {
uint64_t *p;
uint64_t *q;
uint64_t *r;
};
static int
vdev_raidz_p_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
int cnt = size / sizeof (src[0]);
ASSERT(pqr->p && !pqr->q && !pqr->r);
for (int i = 0; i < cnt; i++, src++, pqr->p++)
*pqr->p ^= *src;
return (0);
}
static int
vdev_raidz_pq_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && !pqr->r);
for (int i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
}
return (0);
}
static int
vdev_raidz_pqr_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && pqr->r);
for (int i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++, pqr->r++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
VDEV_RAIDZ_64MUL_4(*pqr->r, mask);
*pqr->r ^= *src;
}
return (0);
}
static void
vdev_raidz_generate_parity_p(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
if (c == rr->rr_firstdatacol) {
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
} else {
struct pqr_struct pqr = { p, NULL, NULL };
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_p_func, &pqr);
}
}
}
static void
vdev_raidz_generate_parity_pq(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
uint64_t *q = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
uint64_t pcnt = rr->rr_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_Q].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
uint64_t ccnt = rr->rr_col[c].rc_size / sizeof (p[0]);
if (c == rr->rr_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
(void) memcpy(q, p, rr->rr_col[c].rc_size);
for (uint64_t i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, NULL };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_pq_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
uint64_t mask;
for (uint64_t i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
}
}
}
}
static void
vdev_raidz_generate_parity_pqr(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
uint64_t *q = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
uint64_t *r = abd_to_buf(rr->rr_col[VDEV_RAIDZ_R].rc_abd);
uint64_t pcnt = rr->rr_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_Q].rc_size);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_R].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
uint64_t ccnt = rr->rr_col[c].rc_size / sizeof (p[0]);
if (c == rr->rr_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
(void) memcpy(q, p, rr->rr_col[c].rc_size);
(void) memcpy(r, p, rr->rr_col[c].rc_size);
for (uint64_t i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
r[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, r };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_pqr_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
uint64_t mask;
for (uint64_t i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
VDEV_RAIDZ_64MUL_4(r[i], mask);
}
}
}
}
/*
* Generate RAID parity in the first virtual columns according to the number of
* parity columns available.
*/
void
vdev_raidz_generate_parity_row(raidz_map_t *rm, raidz_row_t *rr)
{
if (rr->rr_cols == 0) {
/*
* We are handling this block one row at a time (because
* this block has a different logical vs physical width,
* due to RAIDZ expansion), and this is a pad-only row,
* which has no parity.
*/
return;
}
/* Generate using the new math implementation */
if (vdev_raidz_math_generate(rm, rr) != RAIDZ_ORIGINAL_IMPL)
return;
switch (rr->rr_firstdatacol) {
case 1:
vdev_raidz_generate_parity_p(rr);
break;
case 2:
vdev_raidz_generate_parity_pq(rr);
break;
case 3:
vdev_raidz_generate_parity_pqr(rr);
break;
default:
cmn_err(CE_PANIC, "invalid RAID-Z configuration");
}
}
void
vdev_raidz_generate_parity(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_generate_parity_row(rm, rr);
}
}
static int
vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private)
{
(void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
int cnt = size / sizeof (src[0]);
for (int i = 0; i < cnt; i++) {
dst[i] ^= src[i];
}
return (0);
}
static int
vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size,
void *private)
{
(void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, src++) {
VDEV_RAIDZ_64MUL_2(*dst, mask);
*dst ^= *src;
}
return (0);
}
static int
vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private)
{
(void) private;
uint64_t *dst = buf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++) {
/* same operation as vdev_raidz_reconst_q_pre_func() on dst */
VDEV_RAIDZ_64MUL_2(*dst, mask);
}
return (0);
}
struct reconst_q_struct {
uint64_t *q;
int exp;
};
static int
vdev_raidz_reconst_q_post_func(void *buf, size_t size, void *private)
{
struct reconst_q_struct *rq = private;
uint64_t *dst = buf;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, rq->q++) {
int j;
uint8_t *b;
*dst ^= *rq->q;
for (j = 0, b = (uint8_t *)dst; j < 8; j++, b++) {
*b = vdev_raidz_exp2(*b, rq->exp);
}
}
return (0);
}
struct reconst_pq_struct {
uint8_t *p;
uint8_t *q;
uint8_t *pxy;
uint8_t *qxy;
int aexp;
int bexp;
};
static int
vdev_raidz_reconst_pq_func(void *xbuf, void *ybuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
uint8_t *yd = ybuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++, yd++) {
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
*yd = *rpq->p ^ *rpq->pxy ^ *xd;
}
return (0);
}
static int
vdev_raidz_reconst_pq_tail_func(void *xbuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++) {
/* same operation as vdev_raidz_reconst_pq_func() on xd */
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
}
return (0);
}
static void
vdev_raidz_reconstruct_p(raidz_row_t *rr, int *tgts, int ntgts)
{
int x = tgts[0];
abd_t *dst, *src;
if (zfs_flags & ZFS_DEBUG_RAIDZ_RECONSTRUCT)
zfs_dbgmsg("reconstruct_p(rm=%px x=%u)", rr, x);
ASSERT3U(ntgts, ==, 1);
ASSERT3U(x, >=, rr->rr_firstdatacol);
ASSERT3U(x, <, rr->rr_cols);
ASSERT3U(rr->rr_col[x].rc_size, <=, rr->rr_col[VDEV_RAIDZ_P].rc_size);
src = rr->rr_col[VDEV_RAIDZ_P].rc_abd;
dst = rr->rr_col[x].rc_abd;
abd_copy_from_buf(dst, abd_to_buf(src), rr->rr_col[x].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
uint64_t size = MIN(rr->rr_col[x].rc_size,
rr->rr_col[c].rc_size);
src = rr->rr_col[c].rc_abd;
if (c == x)
continue;
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_p_func, NULL);
}
}
static void
vdev_raidz_reconstruct_q(raidz_row_t *rr, int *tgts, int ntgts)
{
int x = tgts[0];
int c, exp;
abd_t *dst, *src;
if (zfs_flags & ZFS_DEBUG_RAIDZ_RECONSTRUCT)
zfs_dbgmsg("reconstruct_q(rm=%px x=%u)", rr, x);
ASSERT(ntgts == 1);
ASSERT(rr->rr_col[x].rc_size <= rr->rr_col[VDEV_RAIDZ_Q].rc_size);
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
uint64_t size = (c == x) ? 0 : MIN(rr->rr_col[x].rc_size,
rr->rr_col[c].rc_size);
src = rr->rr_col[c].rc_abd;
dst = rr->rr_col[x].rc_abd;
if (c == rr->rr_firstdatacol) {
abd_copy(dst, src, size);
if (rr->rr_col[x].rc_size > size) {
abd_zero_off(dst, size,
rr->rr_col[x].rc_size - size);
}
} else {
ASSERT3U(size, <=, rr->rr_col[x].rc_size);
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_q_pre_func, NULL);
(void) abd_iterate_func(dst,
size, rr->rr_col[x].rc_size - size,
vdev_raidz_reconst_q_pre_tail_func, NULL);
}
}
src = rr->rr_col[VDEV_RAIDZ_Q].rc_abd;
dst = rr->rr_col[x].rc_abd;
exp = 255 - (rr->rr_cols - 1 - x);
struct reconst_q_struct rq = { abd_to_buf(src), exp };
(void) abd_iterate_func(dst, 0, rr->rr_col[x].rc_size,
vdev_raidz_reconst_q_post_func, &rq);
}
static void
vdev_raidz_reconstruct_pq(raidz_row_t *rr, int *tgts, int ntgts)
{
uint8_t *p, *q, *pxy, *qxy, tmp, a, b, aexp, bexp;
abd_t *pdata, *qdata;
uint64_t xsize, ysize;
int x = tgts[0];
int y = tgts[1];
abd_t *xd, *yd;
if (zfs_flags & ZFS_DEBUG_RAIDZ_RECONSTRUCT)
zfs_dbgmsg("reconstruct_pq(rm=%px x=%u y=%u)", rr, x, y);
ASSERT(ntgts == 2);
ASSERT(x < y);
ASSERT(x >= rr->rr_firstdatacol);
ASSERT(y < rr->rr_cols);
ASSERT(rr->rr_col[x].rc_size >= rr->rr_col[y].rc_size);
/*
* Move the parity data aside -- we're going to compute parity as
* though columns x and y were full of zeros -- Pxy and Qxy. We want to
* reuse the parity generation mechanism without trashing the actual
* parity so we make those columns appear to be full of zeros by
* setting their lengths to zero.
*/
pdata = rr->rr_col[VDEV_RAIDZ_P].rc_abd;
qdata = rr->rr_col[VDEV_RAIDZ_Q].rc_abd;
xsize = rr->rr_col[x].rc_size;
ysize = rr->rr_col[y].rc_size;
rr->rr_col[VDEV_RAIDZ_P].rc_abd =
abd_alloc_linear(rr->rr_col[VDEV_RAIDZ_P].rc_size, B_TRUE);
rr->rr_col[VDEV_RAIDZ_Q].rc_abd =
abd_alloc_linear(rr->rr_col[VDEV_RAIDZ_Q].rc_size, B_TRUE);
rr->rr_col[x].rc_size = 0;
rr->rr_col[y].rc_size = 0;
vdev_raidz_generate_parity_pq(rr);
rr->rr_col[x].rc_size = xsize;
rr->rr_col[y].rc_size = ysize;
p = abd_to_buf(pdata);
q = abd_to_buf(qdata);
pxy = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
qxy = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
xd = rr->rr_col[x].rc_abd;
yd = rr->rr_col[y].rc_abd;
/*
* We now have:
* Pxy = P + D_x + D_y
* Qxy = Q + 2^(ndevs - 1 - x) * D_x + 2^(ndevs - 1 - y) * D_y
*
* We can then solve for D_x:
* D_x = A * (P + Pxy) + B * (Q + Qxy)
* where
* A = 2^(x - y) * (2^(x - y) + 1)^-1
* B = 2^(ndevs - 1 - x) * (2^(x - y) + 1)^-1
*
* With D_x in hand, we can easily solve for D_y:
* D_y = P + Pxy + D_x
*/
a = vdev_raidz_pow2[255 + x - y];
b = vdev_raidz_pow2[255 - (rr->rr_cols - 1 - x)];
tmp = 255 - vdev_raidz_log2[a ^ 1];
aexp = vdev_raidz_log2[vdev_raidz_exp2(a, tmp)];
bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)];
ASSERT3U(xsize, >=, ysize);
struct reconst_pq_struct rpq = { p, q, pxy, qxy, aexp, bexp };
(void) abd_iterate_func2(xd, yd, 0, 0, ysize,
vdev_raidz_reconst_pq_func, &rpq);
(void) abd_iterate_func(xd, ysize, xsize - ysize,
vdev_raidz_reconst_pq_tail_func, &rpq);
abd_free(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
abd_free(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
/*
* Restore the saved parity data.
*/
rr->rr_col[VDEV_RAIDZ_P].rc_abd = pdata;
rr->rr_col[VDEV_RAIDZ_Q].rc_abd = qdata;
}
/*
* In the general case of reconstruction, we must solve the system of linear
* equations defined by the coefficients used to generate parity as well as
* the contents of the data and parity disks. This can be expressed with
* vectors for the original data (D) and the actual data (d) and parity (p)
* and a matrix composed of the identity matrix (I) and a dispersal matrix (V):
*
* __ __ __ __
* | | __ __ | p_0 |
* | V | | D_0 | | p_m-1 |
* | | x | : | = | d_0 |
* | I | | D_n-1 | | : |
* | | ~~ ~~ | d_n-1 |
* ~~ ~~ ~~ ~~
*
* I is simply a square identity matrix of size n, and V is a vandermonde
* matrix defined by the coefficients we chose for the various parity columns
* (1, 2, 4). Note that these values were chosen both for simplicity, speedy
* computation as well as linear separability.
*
* __ __ __ __
* | 1 .. 1 1 1 | | p_0 |
* | 2^n-1 .. 4 2 1 | __ __ | : |
* | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 |
* | 1 .. 0 0 0 | | D_1 | | d_0 |
* | 0 .. 0 0 0 | x | D_2 | = | d_1 |
* | : : : : | | : | | d_2 |
* | 0 .. 1 0 0 | | D_n-1 | | : |
* | 0 .. 0 1 0 | ~~ ~~ | : |
* | 0 .. 0 0 1 | | d_n-1 |
* ~~ ~~ ~~ ~~
*
* Note that I, V, d, and p are known. To compute D, we must invert the
* matrix and use the known data and parity values to reconstruct the unknown
* data values. We begin by removing the rows in V|I and d|p that correspond
* to failed or missing columns; we then make V|I square (n x n) and d|p
* sized n by removing rows corresponding to unused parity from the bottom up
* to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)'
* using Gauss-Jordan elimination. In the example below we use m=3 parity
* columns, n=8 data columns, with errors in d_1, d_2, and p_1:
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks
* | 19 205 116 29 64 16 4 1 | / /
* | 1 0 0 0 0 0 0 0 | / /
* | 0 1 0 0 0 0 0 0 | <--' /
* (V|I) = | 0 0 1 0 0 0 0 0 | <---'
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 |
* | 19 205 116 29 64 16 4 1 |
* | 1 0 0 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 |
* (V|I)' = | 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We
* have carefully chosen the seed values 1, 2, and 4 to ensure that this
* matrix is not singular.
* __ __
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 0 0 1 0 0 0 0 0 |
* | 167 100 5 41 159 169 217 208 |
* | 166 100 4 40 158 168 216 209 |
* (V|I)'^-1 = | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values
* of the missing data.
*
* As is apparent from the example above, the only non-trivial rows in the
* inverse matrix correspond to the data disks that we're trying to
* reconstruct. Indeed, those are the only rows we need as the others would
* only be useful for reconstructing data known or assumed to be valid. For
* that reason, we only build the coefficients in the rows that correspond to
* targeted columns.
*/
static void
vdev_raidz_matrix_init(raidz_row_t *rr, int n, int nmap, int *map,
uint8_t **rows)
{
int i, j;
int pow;
ASSERT(n == rr->rr_cols - rr->rr_firstdatacol);
/*
* Fill in the missing rows of interest.
*/
for (i = 0; i < nmap; i++) {
ASSERT3S(0, <=, map[i]);
ASSERT3S(map[i], <=, 2);
pow = map[i] * n;
if (pow > 255)
pow -= 255;
ASSERT(pow <= 255);
for (j = 0; j < n; j++) {
pow -= map[i];
if (pow < 0)
pow += 255;
rows[i][j] = vdev_raidz_pow2[pow];
}
}
}
static void
vdev_raidz_matrix_invert(raidz_row_t *rr, int n, int nmissing, int *missing,
uint8_t **rows, uint8_t **invrows, const uint8_t *used)
{
int i, j, ii, jj;
uint8_t log;
/*
* Assert that the first nmissing entries from the array of used
* columns correspond to parity columns and that subsequent entries
* correspond to data columns.
*/
for (i = 0; i < nmissing; i++) {
ASSERT3S(used[i], <, rr->rr_firstdatacol);
}
for (; i < n; i++) {
ASSERT3S(used[i], >=, rr->rr_firstdatacol);
}
/*
* First initialize the storage where we'll compute the inverse rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
invrows[i][j] = (i == j) ? 1 : 0;
}
}
/*
* Subtract all trivial rows from the rows of consequence.
*/
for (i = 0; i < nmissing; i++) {
for (j = nmissing; j < n; j++) {
ASSERT3U(used[j], >=, rr->rr_firstdatacol);
jj = used[j] - rr->rr_firstdatacol;
ASSERT3S(jj, <, n);
invrows[i][j] = rows[i][jj];
rows[i][jj] = 0;
}
}
/*
* For each of the rows of interest, we must normalize it and subtract
* a multiple of it from the other rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < missing[i]; j++) {
ASSERT0(rows[i][j]);
}
ASSERT3U(rows[i][missing[i]], !=, 0);
/*
* Compute the inverse of the first element and multiply each
* element in the row by that value.
*/
log = 255 - vdev_raidz_log2[rows[i][missing[i]]];
for (j = 0; j < n; j++) {
rows[i][j] = vdev_raidz_exp2(rows[i][j], log);
invrows[i][j] = vdev_raidz_exp2(invrows[i][j], log);
}
for (ii = 0; ii < nmissing; ii++) {
if (i == ii)
continue;
ASSERT3U(rows[ii][missing[i]], !=, 0);
log = vdev_raidz_log2[rows[ii][missing[i]]];
for (j = 0; j < n; j++) {
rows[ii][j] ^=
vdev_raidz_exp2(rows[i][j], log);
invrows[ii][j] ^=
vdev_raidz_exp2(invrows[i][j], log);
}
}
}
/*
* Verify that the data that is left in the rows are properly part of
* an identity matrix.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
if (j == missing[i]) {
ASSERT3U(rows[i][j], ==, 1);
} else {
ASSERT0(rows[i][j]);
}
}
}
}
static void
vdev_raidz_matrix_reconstruct(raidz_row_t *rr, int n, int nmissing,
int *missing, uint8_t **invrows, const uint8_t *used)
{
int i, j, x, cc, c;
uint8_t *src;
uint64_t ccount;
uint8_t *dst[VDEV_RAIDZ_MAXPARITY] = { NULL };
uint64_t dcount[VDEV_RAIDZ_MAXPARITY] = { 0 };
uint8_t log = 0;
uint8_t val;
int ll;
uint8_t *invlog[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
psize = sizeof (invlog[0][0]) * n * nmissing;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing; i++) {
invlog[i] = pp;
pp += n;
}
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
ASSERT3U(invrows[i][j], !=, 0);
invlog[i][j] = vdev_raidz_log2[invrows[i][j]];
}
}
for (i = 0; i < n; i++) {
c = used[i];
ASSERT3U(c, <, rr->rr_cols);
ccount = rr->rr_col[c].rc_size;
ASSERT(ccount >= rr->rr_col[missing[0]].rc_size || i > 0);
if (ccount == 0)
continue;
src = abd_to_buf(rr->rr_col[c].rc_abd);
for (j = 0; j < nmissing; j++) {
cc = missing[j] + rr->rr_firstdatacol;
ASSERT3U(cc, >=, rr->rr_firstdatacol);
ASSERT3U(cc, <, rr->rr_cols);
ASSERT3U(cc, !=, c);
dcount[j] = rr->rr_col[cc].rc_size;
if (dcount[j] != 0)
dst[j] = abd_to_buf(rr->rr_col[cc].rc_abd);
}
for (x = 0; x < ccount; x++, src++) {
if (*src != 0)
log = vdev_raidz_log2[*src];
for (cc = 0; cc < nmissing; cc++) {
if (x >= dcount[cc])
continue;
if (*src == 0) {
val = 0;
} else {
if ((ll = log + invlog[cc][i]) >= 255)
ll -= 255;
val = vdev_raidz_pow2[ll];
}
if (i == 0)
dst[cc][x] = val;
else
dst[cc][x] ^= val;
}
}
}
kmem_free(p, psize);
}
static void
vdev_raidz_reconstruct_general(raidz_row_t *rr, int *tgts, int ntgts)
{
int i, c, t, tt;
unsigned int n;
unsigned int nmissing_rows;
int missing_rows[VDEV_RAIDZ_MAXPARITY];
int parity_map[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
uint8_t *rows[VDEV_RAIDZ_MAXPARITY];
uint8_t *invrows[VDEV_RAIDZ_MAXPARITY];
uint8_t *used;
abd_t **bufs = NULL;
if (zfs_flags & ZFS_DEBUG_RAIDZ_RECONSTRUCT)
zfs_dbgmsg("reconstruct_general(rm=%px ntgts=%u)", rr, ntgts);
/*
* Matrix reconstruction can't use scatter ABDs yet, so we allocate
* temporary linear ABDs if any non-linear ABDs are found.
*/
for (i = rr->rr_firstdatacol; i < rr->rr_cols; i++) {
ASSERT(rr->rr_col[i].rc_abd != NULL);
if (!abd_is_linear(rr->rr_col[i].rc_abd)) {
bufs = kmem_alloc(rr->rr_cols * sizeof (abd_t *),
KM_PUSHPAGE);
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *col = &rr->rr_col[c];
bufs[c] = col->rc_abd;
if (bufs[c] != NULL) {
col->rc_abd = abd_alloc_linear(
col->rc_size, B_TRUE);
abd_copy(col->rc_abd, bufs[c],
col->rc_size);
}
}
break;
}
}
n = rr->rr_cols - rr->rr_firstdatacol;
/*
* Figure out which data columns are missing.
*/
nmissing_rows = 0;
for (t = 0; t < ntgts; t++) {
if (tgts[t] >= rr->rr_firstdatacol) {
missing_rows[nmissing_rows++] =
tgts[t] - rr->rr_firstdatacol;
}
}
/*
* Figure out which parity columns to use to help generate the missing
* data columns.
*/
for (tt = 0, c = 0, i = 0; i < nmissing_rows; c++) {
ASSERT(tt < ntgts);
ASSERT(c < rr->rr_firstdatacol);
/*
* Skip any targeted parity columns.
*/
if (c == tgts[tt]) {
tt++;
continue;
}
parity_map[i] = c;
i++;
}
psize = (sizeof (rows[0][0]) + sizeof (invrows[0][0])) *
nmissing_rows * n + sizeof (used[0]) * n;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing_rows; i++) {
rows[i] = pp;
pp += n;
invrows[i] = pp;
pp += n;
}
used = pp;
for (i = 0; i < nmissing_rows; i++) {
used[i] = parity_map[i];
}
for (tt = 0, c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
if (tt < nmissing_rows &&
c == missing_rows[tt] + rr->rr_firstdatacol) {
tt++;
continue;
}
ASSERT3S(i, <, n);
used[i] = c;
i++;
}
/*
* Initialize the interesting rows of the matrix.
*/
vdev_raidz_matrix_init(rr, n, nmissing_rows, parity_map, rows);
/*
* Invert the matrix.
*/
vdev_raidz_matrix_invert(rr, n, nmissing_rows, missing_rows, rows,
invrows, used);
/*
* Reconstruct the missing data using the generated matrix.
*/
vdev_raidz_matrix_reconstruct(rr, n, nmissing_rows, missing_rows,
invrows, used);
kmem_free(p, psize);
/*
* copy back from temporary linear abds and free them
*/
if (bufs) {
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *col = &rr->rr_col[c];
if (bufs[c] != NULL) {
abd_copy(bufs[c], col->rc_abd, col->rc_size);
abd_free(col->rc_abd);
}
col->rc_abd = bufs[c];
}
kmem_free(bufs, rr->rr_cols * sizeof (abd_t *));
}
}
static void
vdev_raidz_reconstruct_row(raidz_map_t *rm, raidz_row_t *rr,
const int *t, int nt)
{
int tgts[VDEV_RAIDZ_MAXPARITY], *dt;
int ntgts;
int i, c, ret;
int nbadparity, nbaddata;
int parity_valid[VDEV_RAIDZ_MAXPARITY];
if (zfs_flags & ZFS_DEBUG_RAIDZ_RECONSTRUCT) {
zfs_dbgmsg("reconstruct(rm=%px nt=%u cols=%u md=%u mp=%u)",
rr, nt, (int)rr->rr_cols, (int)rr->rr_missingdata,
(int)rr->rr_missingparity);
}
nbadparity = rr->rr_firstdatacol;
nbaddata = rr->rr_cols - nbadparity;
ntgts = 0;
for (i = 0, c = 0; c < rr->rr_cols; c++) {
if (zfs_flags & ZFS_DEBUG_RAIDZ_RECONSTRUCT) {
zfs_dbgmsg("reconstruct(rm=%px col=%u devid=%u "
"offset=%llx error=%u)",
rr, c, (int)rr->rr_col[c].rc_devidx,
(long long)rr->rr_col[c].rc_offset,
(int)rr->rr_col[c].rc_error);
}
if (c < rr->rr_firstdatacol)
parity_valid[c] = B_FALSE;
if (i < nt && c == t[i]) {
tgts[ntgts++] = c;
i++;
} else if (rr->rr_col[c].rc_error != 0) {
tgts[ntgts++] = c;
} else if (c >= rr->rr_firstdatacol) {
nbaddata--;
} else {
parity_valid[c] = B_TRUE;
nbadparity--;
}
}
ASSERT(ntgts >= nt);
ASSERT(nbaddata >= 0);
ASSERT(nbaddata + nbadparity == ntgts);
dt = &tgts[nbadparity];
/* Reconstruct using the new math implementation */
ret = vdev_raidz_math_reconstruct(rm, rr, parity_valid, dt, nbaddata);
if (ret != RAIDZ_ORIGINAL_IMPL)
return;
/*
* See if we can use any of our optimized reconstruction routines.
*/
switch (nbaddata) {
case 1:
if (parity_valid[VDEV_RAIDZ_P]) {
vdev_raidz_reconstruct_p(rr, dt, 1);
return;
}
ASSERT(rr->rr_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_Q]) {
vdev_raidz_reconstruct_q(rr, dt, 1);
return;
}
ASSERT(rr->rr_firstdatacol > 2);
break;
case 2:
ASSERT(rr->rr_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_P] &&
parity_valid[VDEV_RAIDZ_Q]) {
vdev_raidz_reconstruct_pq(rr, dt, 2);
return;
}
ASSERT(rr->rr_firstdatacol > 2);
break;
}
vdev_raidz_reconstruct_general(rr, tgts, ntgts);
}
static int
vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t nparity = vdrz->vd_nparity;
int c;
int lasterror = 0;
int numerrors = 0;
ASSERT(nparity > 0);
if (nparity > VDEV_RAIDZ_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error != 0) {
lasterror = cvd->vdev_open_error;
numerrors++;
continue;
}
*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
*logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
}
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error != 0)
continue;
*physical_ashift = vdev_best_ashift(*logical_ashift,
*physical_ashift, cvd->vdev_physical_ashift);
}
if (vd->vdev_rz_expanding) {
*asize *= vd->vdev_children - 1;
*max_asize *= vd->vdev_children - 1;
vd->vdev_min_asize = *asize;
} else {
*asize *= vd->vdev_children;
*max_asize *= vd->vdev_children;
}
if (numerrors > nparity) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (lasterror);
}
return (0);
}
static void
vdev_raidz_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c] != NULL)
vdev_close(vd->vdev_child[c]);
}
}
/*
* Return the logical width to use, given the txg in which the allocation
* happened. Note that BP_GET_BIRTH() is usually the txg in which the
* BP was allocated. Remapped BP's (that were relocated due to device
* removal, see remap_blkptr_cb()), will have a more recent physical birth
* which reflects when the BP was relocated, but we can ignore these because
* they can't be on RAIDZ (device removal doesn't support RAIDZ).
*/
static uint64_t
vdev_raidz_get_logical_width(vdev_raidz_t *vdrz, uint64_t txg)
{
reflow_node_t lookup = {
.re_txg = txg,
};
avl_index_t where;
uint64_t width;
mutex_enter(&vdrz->vd_expand_lock);
reflow_node_t *re = avl_find(&vdrz->vd_expand_txgs, &lookup, &where);
if (re != NULL) {
width = re->re_logical_width;
} else {
re = avl_nearest(&vdrz->vd_expand_txgs, where, AVL_BEFORE);
if (re != NULL)
width = re->re_logical_width;
else
width = vdrz->vd_original_width;
}
mutex_exit(&vdrz->vd_expand_lock);
return (width);
}
/*
* Note: If the RAIDZ vdev has been expanded, older BP's may have allocated
* more space due to the lower data-to-parity ratio. In this case it's
* important to pass in the correct txg. Note that vdev_gang_header_asize()
* relies on a constant asize for psize=SPA_GANGBLOCKSIZE=SPA_MINBLOCKSIZE,
* regardless of txg. This is assured because for a single data sector, we
* allocate P+1 sectors regardless of width ("cols", which is at least P+1).
*/
static uint64_t
vdev_raidz_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t asize;
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t cols = vdrz->vd_original_width;
uint64_t nparity = vdrz->vd_nparity;
cols = vdev_raidz_get_logical_width(vdrz, txg);
asize = ((psize - 1) >> ashift) + 1;
asize += nparity * ((asize + cols - nparity - 1) / (cols - nparity));
asize = roundup(asize, nparity + 1) << ashift;
#ifdef ZFS_DEBUG
uint64_t asize_new = ((psize - 1) >> ashift) + 1;
uint64_t ncols_new = vdrz->vd_physical_width;
asize_new += nparity * ((asize_new + ncols_new - nparity - 1) /
(ncols_new - nparity));
asize_new = roundup(asize_new, nparity + 1) << ashift;
VERIFY3U(asize_new, <=, asize);
#endif
return (asize);
}
/*
* The allocatable space for a raidz vdev is N * sizeof(smallest child)
* so each child must provide at least 1/Nth of its asize.
*/
static uint64_t
vdev_raidz_min_asize(vdev_t *vd)
{
return ((vd->vdev_min_asize + vd->vdev_children - 1) /
vd->vdev_children);
}
void
vdev_raidz_child_done(zio_t *zio)
{
raidz_col_t *rc = zio->io_private;
ASSERT3P(rc->rc_abd, !=, NULL);
rc->rc_error = zio->io_error;
rc->rc_tried = 1;
rc->rc_skipped = 0;
}
static void
vdev_raidz_shadow_child_done(zio_t *zio)
{
raidz_col_t *rc = zio->io_private;
rc->rc_shadow_error = zio->io_error;
}
static void
vdev_raidz_io_verify(zio_t *zio, raidz_map_t *rm, raidz_row_t *rr, int col)
{
(void) rm;
#ifdef ZFS_DEBUG
- range_seg64_t logical_rs, physical_rs, remain_rs;
+ zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = rr->rr_offset;
logical_rs.rs_end = logical_rs.rs_start +
vdev_raidz_asize(zio->io_vd, rr->rr_size,
BP_GET_BIRTH(zio->io_bp));
raidz_col_t *rc = &rr->rr_col[col];
vdev_t *cvd = zio->io_vd->vdev_child[rc->rc_devidx];
vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
ASSERT(vdev_xlate_is_empty(&remain_rs));
if (vdev_xlate_is_empty(&physical_rs)) {
/*
* If we are in the middle of expansion, the
* physical->logical mapping is changing so vdev_xlate()
* can't give us a reliable answer.
*/
return;
}
ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
/*
* It would be nice to assert that rs_end is equal
* to rc_offset + rc_size but there might be an
* optional I/O at the end that is not accounted in
* rc_size.
*/
if (physical_rs.rs_end > rc->rc_offset + rc->rc_size) {
ASSERT3U(physical_rs.rs_end, ==, rc->rc_offset +
rc->rc_size + (1 << zio->io_vd->vdev_top->vdev_ashift));
} else {
ASSERT3U(physical_rs.rs_end, ==, rc->rc_offset + rc->rc_size);
}
#endif
}
static void
vdev_raidz_io_start_write(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_generate_parity_row(rm, rr);
for (int c = 0; c < rr->rr_scols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
/* Verify physical to logical translation */
vdev_raidz_io_verify(zio, rm, rr, c);
if (rc->rc_size == 0)
continue;
ASSERT3U(rc->rc_offset + rc->rc_size, <,
cvd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3P(rc->rc_abd, !=, NULL);
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd,
abd_get_size(rc->rc_abd), zio->io_type,
zio->io_priority, 0, vdev_raidz_child_done, rc));
if (rc->rc_shadow_devidx != INT_MAX) {
vdev_t *cvd2 = vd->vdev_child[rc->rc_shadow_devidx];
ASSERT3U(
rc->rc_shadow_offset + abd_get_size(rc->rc_abd), <,
cvd2->vdev_psize - VDEV_LABEL_END_SIZE);
zio_nowait(zio_vdev_child_io(zio, NULL, cvd2,
rc->rc_shadow_offset, rc->rc_abd,
abd_get_size(rc->rc_abd),
zio->io_type, zio->io_priority, 0,
vdev_raidz_shadow_child_done, rc));
}
}
}
/*
* Generate optional I/Os for skip sectors to improve aggregation contiguity.
* This only works for vdev_raidz_map_alloc() (not _expanded()).
*/
static void
raidz_start_skip_writes(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
uint64_t ashift = vd->vdev_top->vdev_ashift;
raidz_map_t *rm = zio->io_vsd;
ASSERT3U(rm->rm_nrows, ==, 1);
raidz_row_t *rr = rm->rm_row[0];
for (int c = 0; c < rr->rr_scols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (rc->rc_size != 0)
continue;
ASSERT3P(rc->rc_abd, ==, NULL);
ASSERT3U(rc->rc_offset, <,
cvd->vdev_psize - VDEV_LABEL_END_SIZE);
zio_nowait(zio_vdev_child_io(zio, NULL, cvd, rc->rc_offset,
NULL, 1ULL << ashift, zio->io_type, zio->io_priority,
ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL, NULL));
}
}
static void
vdev_raidz_io_start_read_row(zio_t *zio, raidz_row_t *rr, boolean_t forceparity)
{
vdev_t *vd = zio->io_vd;
/*
* Iterate over the columns in reverse order so that we hit the parity
* last -- any errors along the way will force us to read the parity.
*/
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0)
continue;
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!vdev_readable(cvd)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1; /* don't even try */
rc->rc_skipped = 1;
continue;
}
if (vdev_dtl_contains(cvd, DTL_MISSING, zio->io_txg, 1)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
if (forceparity ||
c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
}
}
}
static void
vdev_raidz_io_start_read_phys_cols(zio_t *zio, raidz_map_t *rm)
{
vdev_t *vd = zio->io_vd;
for (int i = 0; i < rm->rm_nphys_cols; i++) {
raidz_col_t *prc = &rm->rm_phys_col[i];
if (prc->rc_size == 0)
continue;
ASSERT3U(prc->rc_devidx, ==, i);
vdev_t *cvd = vd->vdev_child[i];
if (!vdev_readable(cvd)) {
prc->rc_error = SET_ERROR(ENXIO);
prc->rc_tried = 1; /* don't even try */
prc->rc_skipped = 1;
continue;
}
if (vdev_dtl_contains(cvd, DTL_MISSING, zio->io_txg, 1)) {
prc->rc_error = SET_ERROR(ESTALE);
prc->rc_skipped = 1;
continue;
}
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
prc->rc_offset, prc->rc_abd, prc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, prc));
}
}
static void
vdev_raidz_io_start_read(zio_t *zio, raidz_map_t *rm)
{
/*
* If there are multiple rows, we will be hitting
* all disks, so go ahead and read the parity so
* that we are reading in decent size chunks.
*/
boolean_t forceparity = rm->rm_nrows > 1;
if (rm->rm_phys_col) {
vdev_raidz_io_start_read_phys_cols(zio, rm);
} else {
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_start_read_row(zio, rr, forceparity);
}
}
}
/*
* Start an IO operation on a RAIDZ VDev
*
* Outline:
* - For write operations:
* 1. Generate the parity data
* 2. Create child zio write operations to each column's vdev, for both
* data and parity.
* 3. If the column skips any sectors for padding, create optional dummy
* write zio children for those areas to improve aggregation continuity.
* - For read operations:
* 1. Create child zio read operations to each data column's vdev to read
* the range of data required for zio.
* 2. If this is a scrub or resilver operation, or if any of the data
* vdevs have had errors, then create zio read operations to the parity
* columns' VDevs as well.
*/
static void
vdev_raidz_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_t *tvd = vd->vdev_top;
vdev_raidz_t *vdrz = vd->vdev_tsd;
raidz_map_t *rm;
uint64_t logical_width = vdev_raidz_get_logical_width(vdrz,
BP_GET_BIRTH(zio->io_bp));
if (logical_width != vdrz->vd_physical_width) {
zfs_locked_range_t *lr = NULL;
uint64_t synced_offset = UINT64_MAX;
uint64_t next_offset = UINT64_MAX;
boolean_t use_scratch = B_FALSE;
/*
* Note: when the expansion is completing, we set
* vre_state=DSS_FINISHED (in raidz_reflow_complete_sync())
* in a later txg than when we last update spa_ubsync's state
* (see the end of spa_raidz_expand_thread()). Therefore we
* may see vre_state!=SCANNING before
* VDEV_TOP_ZAP_RAIDZ_EXPAND_STATE=DSS_FINISHED is reflected
* on disk, but the copying progress has been synced to disk
* (and reflected in spa_ubsync). In this case it's fine to
* treat the expansion as completed, since if we crash there's
* no additional copying to do.
*/
if (vdrz->vn_vre.vre_state == DSS_SCANNING) {
ASSERT3P(vd->vdev_spa->spa_raidz_expand, ==,
&vdrz->vn_vre);
lr = zfs_rangelock_enter(&vdrz->vn_vre.vre_rangelock,
zio->io_offset, zio->io_size, RL_READER);
use_scratch =
(RRSS_GET_STATE(&vd->vdev_spa->spa_ubsync) ==
RRSS_SCRATCH_VALID);
synced_offset =
RRSS_GET_OFFSET(&vd->vdev_spa->spa_ubsync);
next_offset = vdrz->vn_vre.vre_offset;
/*
* If we haven't resumed expanding since importing the
* pool, vre_offset won't have been set yet. In
* this case the next offset to be copied is the same
* as what was synced.
*/
if (next_offset == UINT64_MAX) {
next_offset = synced_offset;
}
}
if (use_scratch) {
zfs_dbgmsg("zio=%px %s io_offset=%llu offset_synced="
"%lld next_offset=%lld use_scratch=%u",
zio,
zio->io_type == ZIO_TYPE_WRITE ? "WRITE" : "READ",
(long long)zio->io_offset,
(long long)synced_offset,
(long long)next_offset,
use_scratch);
}
rm = vdev_raidz_map_alloc_expanded(zio,
tvd->vdev_ashift, vdrz->vd_physical_width,
logical_width, vdrz->vd_nparity,
synced_offset, next_offset, use_scratch);
rm->rm_lr = lr;
} else {
rm = vdev_raidz_map_alloc(zio,
tvd->vdev_ashift, logical_width, vdrz->vd_nparity);
}
rm->rm_original_width = vdrz->vd_original_width;
zio->io_vsd = rm;
zio->io_vsd_ops = &vdev_raidz_vsd_ops;
if (zio->io_type == ZIO_TYPE_WRITE) {
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_raidz_io_start_write(zio, rm->rm_row[i]);
}
if (logical_width == vdrz->vd_physical_width) {
raidz_start_skip_writes(zio);
}
} else {
ASSERT(zio->io_type == ZIO_TYPE_READ);
vdev_raidz_io_start_read(zio, rm);
}
zio_execute(zio);
}
/*
* Report a checksum error for a child of a RAID-Z device.
*/
void
vdev_raidz_checksum_error(zio_t *zio, raidz_col_t *rc, abd_t *bad_data)
{
vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx];
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE) &&
zio->io_priority != ZIO_PRIORITY_REBUILD) {
zio_bad_cksum_t zbc;
raidz_map_t *rm = zio->io_vsd;
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
(void) zfs_ereport_post_checksum(zio->io_spa, vd,
&zio->io_bookmark, zio, rc->rc_offset, rc->rc_size,
rc->rc_abd, bad_data, &zbc);
}
}
/*
* We keep track of whether or not there were any injected errors, so that
* any ereports we generate can note it.
*/
static int
raidz_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t zbc = {0};
raidz_map_t *rm = zio->io_vsd;
int ret = zio_checksum_error(zio, &zbc);
/*
* Any Direct I/O read that has a checksum error must be treated as
* suspicious as the contents of the buffer could be getting
* manipulated while the I/O is taking place. The checksum verify error
* will be reported to the top-level RAIDZ VDEV.
*/
if (zio->io_flags & ZIO_FLAG_DIO_READ && ret == ECKSUM) {
zio->io_error = ret;
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
zio_dio_chksum_verify_error_report(zio);
zio_checksum_verified(zio);
return (0);
}
if (ret != 0 && zbc.zbc_injected != 0)
rm->rm_ecksuminjected = 1;
return (ret);
}
/*
* Generate the parity from the data columns. If we tried and were able to
* read the parity without error, verify that the generated parity matches the
* data we read. If it doesn't, we fire off a checksum error. Return the
* number of such failures.
*/
static int
raidz_parity_verify(zio_t *zio, raidz_row_t *rr)
{
abd_t *orig[VDEV_RAIDZ_MAXPARITY];
int c, ret = 0;
raidz_map_t *rm = zio->io_vsd;
raidz_col_t *rc;
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum = (bp == NULL ? zio->io_prop.zp_checksum :
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
if (checksum == ZIO_CHECKSUM_NOPARITY)
return (ret);
for (c = 0; c < rr->rr_firstdatacol; c++) {
rc = &rr->rr_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
orig[c] = rc->rc_abd;
ASSERT3U(abd_get_size(rc->rc_abd), ==, rc->rc_size);
rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
}
/*
* Verify any empty sectors are zero filled to ensure the parity
* is calculated correctly even if these non-data sectors are damaged.
*/
if (rr->rr_nempty && rr->rr_abd_empty != NULL)
ret += vdev_draid_map_verify_empty(zio, rr);
/*
* Regenerates parity even for !tried||rc_error!=0 columns. This
* isn't harmful but it does have the side effect of fixing stuff
* we didn't realize was necessary (i.e. even if we return 0).
*/
vdev_raidz_generate_parity_row(rm, rr);
for (c = 0; c < rr->rr_firstdatacol; c++) {
rc = &rr->rr_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
if (abd_cmp(orig[c], rc->rc_abd) != 0) {
zfs_dbgmsg("found error on col=%u devidx=%u off %llx",
c, (int)rc->rc_devidx, (u_longlong_t)rc->rc_offset);
vdev_raidz_checksum_error(zio, rc, orig[c]);
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
abd_free(orig[c]);
}
return (ret);
}
static int
vdev_raidz_worst_error(raidz_row_t *rr)
{
int error = 0;
for (int c = 0; c < rr->rr_cols; c++) {
error = zio_worst_error(error, rr->rr_col[c].rc_error);
error = zio_worst_error(error, rr->rr_col[c].rc_shadow_error);
}
return (error);
}
static void
vdev_raidz_io_done_verified(zio_t *zio, raidz_row_t *rr)
{
int unexpected_errors = 0;
int parity_errors = 0;
int parity_untried = 0;
int data_errors = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error) {
if (c < rr->rr_firstdatacol)
parity_errors++;
else
data_errors++;
if (!rc->rc_skipped)
unexpected_errors++;
} else if (c < rr->rr_firstdatacol && !rc->rc_tried) {
parity_untried++;
}
if (rc->rc_force_repair)
unexpected_errors++;
}
/*
* If we read more parity disks than were used for
* reconstruction, confirm that the other parity disks produced
* correct data.
*
* Note that we also regenerate parity when resilvering so we
* can write it out to failed devices later.
*/
if (parity_errors + parity_untried <
rr->rr_firstdatacol - data_errors ||
(zio->io_flags & ZIO_FLAG_RESILVER)) {
int n = raidz_parity_verify(zio, rr);
unexpected_errors += n;
}
if (zio->io_error == 0 && spa_writeable(zio->io_spa) &&
(unexpected_errors > 0 || (zio->io_flags & ZIO_FLAG_RESILVER))) {
/*
* Use the good data we have in hand to repair damaged children.
*/
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *vd = zio->io_vd;
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!rc->rc_allow_repair) {
continue;
} else if (!rc->rc_force_repair &&
(rc->rc_error == 0 || rc->rc_size == 0)) {
continue;
}
/*
* We do not allow self healing for Direct I/O reads.
* See comment in vdev_raid_row_alloc().
*/
ASSERT0(zio->io_flags & ZIO_FLAG_DIO_READ);
zfs_dbgmsg("zio=%px repairing c=%u devidx=%u "
"offset=%llx",
zio, c, rc->rc_devidx, (long long)rc->rc_offset);
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
ZIO_TYPE_WRITE,
zio->io_priority == ZIO_PRIORITY_REBUILD ?
ZIO_PRIORITY_REBUILD : ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
}
}
/*
* Scrub or resilver i/o's: overwrite any shadow locations with the
* good data. This ensures that if we've already copied this sector,
* it will be corrected if it was damaged. This writes more than is
* necessary, but since expansion is paused during scrub/resilver, at
* most a single row will have a shadow location.
*/
if (zio->io_error == 0 && spa_writeable(zio->io_spa) &&
(zio->io_flags & (ZIO_FLAG_RESILVER | ZIO_FLAG_SCRUB))) {
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *vd = zio->io_vd;
if (rc->rc_shadow_devidx == INT_MAX || rc->rc_size == 0)
continue;
vdev_t *cvd = vd->vdev_child[rc->rc_shadow_devidx];
/*
* Note: We don't want to update the repair stats
* because that would incorrectly indicate that there
* was bad data to repair, which we aren't sure about.
* By clearing the SCAN_THREAD flag, we prevent this
* from happening, despite having the REPAIR flag set.
* We need to set SELF_HEAL so that this i/o can't be
* bypassed by zio_vdev_io_start().
*/
zio_t *cio = zio_vdev_child_io(zio, NULL, cvd,
rc->rc_shadow_offset, rc->rc_abd, rc->rc_size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
NULL, NULL);
cio->io_flags &= ~ZIO_FLAG_SCAN_THREAD;
zio_nowait(cio);
}
}
}
static void
raidz_restore_orig_data(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_need_orig_restore) {
abd_copy(rc->rc_abd,
rc->rc_orig_data, rc->rc_size);
rc->rc_need_orig_restore = B_FALSE;
}
}
}
}
/*
* During raidz_reconstruct() for expanded VDEV, we need special consideration
* failure simulations. See note in raidz_reconstruct() on simulating failure
* of a pre-expansion device.
*
* Treating logical child i as failed, return TRUE if the given column should
* be treated as failed. The idea of logical children allows us to imagine
* that a disk silently failed before a RAIDZ expansion (reads from this disk
* succeed but return the wrong data). Since the expansion doesn't verify
* checksums, the incorrect data will be moved to new locations spread among
* the children (going diagonally across them).
*
* Higher "logical child failures" (values of `i`) indicate these
* "pre-expansion failures". The first physical_width values imagine that a
* current child failed; the next physical_width-1 values imagine that a
* child failed before the most recent expansion; the next physical_width-2
* values imagine a child failed in the expansion before that, etc.
*/
static boolean_t
raidz_simulate_failure(int physical_width, int original_width, int ashift,
int i, raidz_col_t *rc)
{
uint64_t sector_id =
physical_width * (rc->rc_offset >> ashift) +
rc->rc_devidx;
for (int w = physical_width; w >= original_width; w--) {
if (i < w) {
return (sector_id % w == i);
} else {
i -= w;
}
}
ASSERT(!"invalid logical child id");
return (B_FALSE);
}
/*
* returns EINVAL if reconstruction of the block will not be possible
* returns ECKSUM if this specific reconstruction failed
* returns 0 on successful reconstruction
*/
static int
raidz_reconstruct(zio_t *zio, int *ltgts, int ntgts, int nparity)
{
raidz_map_t *rm = zio->io_vsd;
int physical_width = zio->io_vd->vdev_children;
int original_width = (rm->rm_original_width != 0) ?
rm->rm_original_width : physical_width;
int dbgmsg = zfs_flags & ZFS_DEBUG_RAIDZ_RECONSTRUCT;
if (dbgmsg) {
zfs_dbgmsg("raidz_reconstruct_expanded(zio=%px ltgts=%u,%u,%u "
"ntgts=%u", zio, ltgts[0], ltgts[1], ltgts[2], ntgts);
}
/* Reconstruct each row */
for (int r = 0; r < rm->rm_nrows; r++) {
raidz_row_t *rr = rm->rm_row[r];
int my_tgts[VDEV_RAIDZ_MAXPARITY]; /* value is child id */
int t = 0;
int dead = 0;
int dead_data = 0;
if (dbgmsg)
zfs_dbgmsg("raidz_reconstruct_expanded(row=%u)", r);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
ASSERT0(rc->rc_need_orig_restore);
if (rc->rc_error != 0) {
dead++;
if (c >= nparity)
dead_data++;
continue;
}
if (rc->rc_size == 0)
continue;
for (int lt = 0; lt < ntgts; lt++) {
if (raidz_simulate_failure(physical_width,
original_width,
zio->io_vd->vdev_top->vdev_ashift,
ltgts[lt], rc)) {
if (rc->rc_orig_data == NULL) {
rc->rc_orig_data =
abd_alloc_linear(
rc->rc_size, B_TRUE);
abd_copy(rc->rc_orig_data,
rc->rc_abd, rc->rc_size);
}
rc->rc_need_orig_restore = B_TRUE;
dead++;
if (c >= nparity)
dead_data++;
/*
* Note: simulating failure of a
* pre-expansion device can hit more
* than one column, in which case we
* might try to simulate more failures
* than can be reconstructed, which is
* also more than the size of my_tgts.
* This check prevents accessing past
* the end of my_tgts. The "dead >
* nparity" check below will fail this
* reconstruction attempt.
*/
if (t < VDEV_RAIDZ_MAXPARITY) {
my_tgts[t++] = c;
if (dbgmsg) {
zfs_dbgmsg("simulating "
"failure of col %u "
"devidx %u", c,
(int)rc->rc_devidx);
}
}
break;
}
}
}
if (dead > nparity) {
/* reconstruction not possible */
if (dbgmsg) {
zfs_dbgmsg("reconstruction not possible; "
"too many failures");
}
raidz_restore_orig_data(rm);
return (EINVAL);
}
if (dead_data > 0)
vdev_raidz_reconstruct_row(rm, rr, my_tgts, t);
}
/* Check for success */
if (raidz_checksum_verify(zio) == 0) {
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
return (0);
/* Reconstruction succeeded - report errors */
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_need_orig_restore) {
/*
* Note: if this is a parity column,
* we don't really know if it's wrong.
* We need to let
* vdev_raidz_io_done_verified() check
* it, and if we set rc_error, it will
* think that it is a "known" error
* that doesn't need to be checked
* or corrected.
*/
if (rc->rc_error == 0 &&
c >= rr->rr_firstdatacol) {
vdev_raidz_checksum_error(zio,
rc, rc->rc_orig_data);
rc->rc_error =
SET_ERROR(ECKSUM);
}
rc->rc_need_orig_restore = B_FALSE;
}
}
vdev_raidz_io_done_verified(zio, rr);
}
zio_checksum_verified(zio);
if (dbgmsg) {
zfs_dbgmsg("reconstruction successful "
"(checksum verified)");
}
return (0);
}
/* Reconstruction failed - restore original data */
raidz_restore_orig_data(rm);
if (dbgmsg) {
zfs_dbgmsg("raidz_reconstruct_expanded(zio=%px) checksum "
"failed", zio);
}
return (ECKSUM);
}
/*
* Iterate over all combinations of N bad vdevs and attempt a reconstruction.
* Note that the algorithm below is non-optimal because it doesn't take into
* account how reconstruction is actually performed. For example, with
* triple-parity RAID-Z the reconstruction procedure is the same if column 4
* is targeted as invalid as if columns 1 and 4 are targeted since in both
* cases we'd only use parity information in column 0.
*
* The order that we find the various possible combinations of failed
* disks is dictated by these rules:
* - Examine each "slot" (the "i" in tgts[i])
* - Try to increment this slot (tgts[i] += 1)
* - if we can't increment because it runs into the next slot,
* reset our slot to the minimum, and examine the next slot
*
* For example, with a 6-wide RAIDZ3, and no known errors (so we have to choose
* 3 columns to reconstruct), we will generate the following sequence:
*
* STATE ACTION
* 0 1 2 special case: skip since these are all parity
* 0 1 3 first slot: reset to 0; middle slot: increment to 2
* 0 2 3 first slot: increment to 1
* 1 2 3 first: reset to 0; middle: reset to 1; last: increment to 4
* 0 1 4 first: reset to 0; middle: increment to 2
* 0 2 4 first: increment to 1
* 1 2 4 first: reset to 0; middle: increment to 3
* 0 3 4 first: increment to 1
* 1 3 4 first: increment to 2
* 2 3 4 first: reset to 0; middle: reset to 1; last: increment to 5
* 0 1 5 first: reset to 0; middle: increment to 2
* 0 2 5 first: increment to 1
* 1 2 5 first: reset to 0; middle: increment to 3
* 0 3 5 first: increment to 1
* 1 3 5 first: increment to 2
* 2 3 5 first: reset to 0; middle: increment to 4
* 0 4 5 first: increment to 1
* 1 4 5 first: increment to 2
* 2 4 5 first: increment to 3
* 3 4 5 done
*
* This strategy works for dRAID but is less efficient when there are a large
* number of child vdevs and therefore permutations to check. Furthermore,
* since the raidz_map_t rows likely do not overlap, reconstruction would be
* possible as long as there are no more than nparity data errors per row.
* These additional permutations are not currently checked but could be as
* a future improvement.
*
* Returns 0 on success, ECKSUM on failure.
*/
static int
vdev_raidz_combrec(zio_t *zio)
{
int nparity = vdev_get_nparity(zio->io_vd);
raidz_map_t *rm = zio->io_vsd;
int physical_width = zio->io_vd->vdev_children;
int original_width = (rm->rm_original_width != 0) ?
rm->rm_original_width : physical_width;
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
int total_errors = 0;
for (int c = 0; c < rr->rr_cols; c++) {
if (rr->rr_col[c].rc_error)
total_errors++;
}
if (total_errors > nparity)
return (vdev_raidz_worst_error(rr));
}
for (int num_failures = 1; num_failures <= nparity; num_failures++) {
int tstore[VDEV_RAIDZ_MAXPARITY + 2];
int *ltgts = &tstore[1]; /* value is logical child ID */
/*
* Determine number of logical children, n. See comment
* above raidz_simulate_failure().
*/
int n = 0;
for (int w = physical_width;
w >= original_width; w--) {
n += w;
}
ASSERT3U(num_failures, <=, nparity);
ASSERT3U(num_failures, <=, VDEV_RAIDZ_MAXPARITY);
/* Handle corner cases in combrec logic */
ltgts[-1] = -1;
for (int i = 0; i < num_failures; i++) {
ltgts[i] = i;
}
ltgts[num_failures] = n;
for (;;) {
int err = raidz_reconstruct(zio, ltgts, num_failures,
nparity);
if (err == EINVAL) {
/*
* Reconstruction not possible with this #
* failures; try more failures.
*/
break;
} else if (err == 0)
return (0);
/* Compute next targets to try */
for (int t = 0; ; t++) {
ASSERT3U(t, <, num_failures);
ltgts[t]++;
if (ltgts[t] == n) {
/* try more failures */
ASSERT3U(t, ==, num_failures - 1);
if (zfs_flags &
ZFS_DEBUG_RAIDZ_RECONSTRUCT) {
zfs_dbgmsg("reconstruction "
"failed for num_failures="
"%u; tried all "
"combinations",
num_failures);
}
break;
}
ASSERT3U(ltgts[t], <, n);
ASSERT3U(ltgts[t], <=, ltgts[t + 1]);
/*
* If that spot is available, we're done here.
* Try the next combination.
*/
if (ltgts[t] != ltgts[t + 1])
break; // found next combination
/*
* Otherwise, reset this tgt to the minimum,
* and move on to the next tgt.
*/
ltgts[t] = ltgts[t - 1] + 1;
ASSERT3U(ltgts[t], ==, t);
}
/* Increase the number of failures and keep trying. */
if (ltgts[num_failures - 1] == n)
break;
}
}
if (zfs_flags & ZFS_DEBUG_RAIDZ_RECONSTRUCT)
zfs_dbgmsg("reconstruction failed for all num_failures");
return (ECKSUM);
}
void
vdev_raidz_reconstruct(raidz_map_t *rm, const int *t, int nt)
{
for (uint64_t row = 0; row < rm->rm_nrows; row++) {
raidz_row_t *rr = rm->rm_row[row];
vdev_raidz_reconstruct_row(rm, rr, t, nt);
}
}
/*
* Complete a write IO operation on a RAIDZ VDev
*
* Outline:
* 1. Check for errors on the child IOs.
* 2. Return, setting an error code if too few child VDevs were written
* to reconstruct the data later. Note that partial writes are
* considered successful if they can be reconstructed at all.
*/
static void
vdev_raidz_io_done_write_impl(zio_t *zio, raidz_row_t *rr)
{
int normal_errors = 0;
int shadow_errors = 0;
ASSERT3U(rr->rr_missingparity, <=, rr->rr_firstdatacol);
ASSERT3U(rr->rr_missingdata, <=, rr->rr_cols - rr->rr_firstdatacol);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error != 0) {
ASSERT(rc->rc_error != ECKSUM); /* child has no bp */
normal_errors++;
}
if (rc->rc_shadow_error != 0) {
ASSERT(rc->rc_shadow_error != ECKSUM);
shadow_errors++;
}
}
/*
* Treat partial writes as a success. If we couldn't write enough
* columns to reconstruct the data, the I/O failed. Otherwise, good
* enough. Note that in the case of a shadow write (during raidz
* expansion), depending on if we crash, either the normal (old) or
* shadow (new) location may become the "real" version of the block,
* so both locations must have sufficient redundancy.
*
* Now that we support write reallocation, it would be better
* to treat partial failure as real failure unless there are
* no non-degraded top-level vdevs left, and not update DTLs
* if we intend to reallocate.
*/
if (normal_errors > rr->rr_firstdatacol ||
shadow_errors > rr->rr_firstdatacol) {
zio->io_error = zio_worst_error(zio->io_error,
vdev_raidz_worst_error(rr));
}
}
static void
vdev_raidz_io_done_reconstruct_known_missing(zio_t *zio, raidz_map_t *rm,
raidz_row_t *rr)
{
int parity_errors = 0;
int parity_untried = 0;
int data_errors = 0;
int total_errors = 0;
ASSERT3U(rr->rr_missingparity, <=, rr->rr_firstdatacol);
ASSERT3U(rr->rr_missingdata, <=, rr->rr_cols - rr->rr_firstdatacol);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
/*
* If scrubbing and a replacing/sparing child vdev determined
* that not all of its children have an identical copy of the
* data, then clear the error so the column is treated like
* any other read and force a repair to correct the damage.
*/
if (rc->rc_error == ECKSUM) {
ASSERT(zio->io_flags & ZIO_FLAG_SCRUB);
vdev_raidz_checksum_error(zio, rc, rc->rc_abd);
rc->rc_force_repair = 1;
rc->rc_error = 0;
}
if (rc->rc_error) {
if (c < rr->rr_firstdatacol)
parity_errors++;
else
data_errors++;
total_errors++;
} else if (c < rr->rr_firstdatacol && !rc->rc_tried) {
parity_untried++;
}
}
/*
* If there were data errors and the number of errors we saw was
* correctable -- less than or equal to the number of parity disks read
* -- reconstruct based on the missing data.
*/
if (data_errors != 0 &&
total_errors <= rr->rr_firstdatacol - parity_untried) {
/*
* We either attempt to read all the parity columns or
* none of them. If we didn't try to read parity, we
* wouldn't be here in the correctable case. There must
* also have been fewer parity errors than parity
* columns or, again, we wouldn't be in this code path.
*/
ASSERT(parity_untried == 0);
ASSERT(parity_errors < rr->rr_firstdatacol);
/*
* Identify the data columns that reported an error.
*/
int n = 0;
int tgts[VDEV_RAIDZ_MAXPARITY];
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error != 0) {
ASSERT(n < VDEV_RAIDZ_MAXPARITY);
tgts[n++] = c;
}
}
ASSERT(rr->rr_firstdatacol >= n);
vdev_raidz_reconstruct_row(rm, rr, tgts, n);
}
}
/*
* Return the number of reads issued.
*/
static int
vdev_raidz_read_all(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
int nread = 0;
rr->rr_missingdata = 0;
rr->rr_missingparity = 0;
/*
* If this rows contains empty sectors which are not required
* for a normal read then allocate an ABD for them now so they
* may be read, verified, and any needed repairs performed.
*/
if (rr->rr_nempty != 0 && rr->rr_abd_empty == NULL)
vdev_draid_map_alloc_empty(zio, rr);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_tried || rc->rc_size == 0)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[rc->rc_devidx],
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
nread++;
}
return (nread);
}
/*
* We're here because either there were too many errors to even attempt
* reconstruction (total_errors == rm_first_datacol), or vdev_*_combrec()
* failed. In either case, there is enough bad data to prevent reconstruction.
* Start checksum ereports for all children which haven't failed.
*/
static void
vdev_raidz_io_done_unrecoverable(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = zio->io_vd->vdev_child[rc->rc_devidx];
if (rc->rc_error != 0)
continue;
zio_bad_cksum_t zbc;
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
mutex_enter(&cvd->vdev_stat_lock);
cvd->vdev_stat.vs_checksum_errors++;
mutex_exit(&cvd->vdev_stat_lock);
(void) zfs_ereport_start_checksum(zio->io_spa,
cvd, &zio->io_bookmark, zio, rc->rc_offset,
rc->rc_size, &zbc);
}
}
}
void
vdev_raidz_io_done(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
ASSERT(zio->io_bp != NULL);
if (zio->io_type == ZIO_TYPE_WRITE) {
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_raidz_io_done_write_impl(zio, rm->rm_row[i]);
}
} else {
if (rm->rm_phys_col) {
/*
* This is an aggregated read. Copy the data and status
* from the aggregate abd's to the individual rows.
*/
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_tried || rc->rc_size == 0)
continue;
raidz_col_t *prc =
&rm->rm_phys_col[rc->rc_devidx];
rc->rc_error = prc->rc_error;
rc->rc_tried = prc->rc_tried;
rc->rc_skipped = prc->rc_skipped;
if (c >= rr->rr_firstdatacol) {
/*
* Note: this is slightly faster
* than using abd_copy_off().
*/
char *physbuf = abd_to_buf(
prc->rc_abd);
void *physloc = physbuf +
rc->rc_offset -
prc->rc_offset;
abd_copy_from_buf(rc->rc_abd,
physloc, rc->rc_size);
}
}
}
}
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_reconstruct_known_missing(zio,
rm, rr);
}
if (raidz_checksum_verify(zio) == 0) {
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
goto done;
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_verified(zio, rr);
}
zio_checksum_verified(zio);
} else {
/*
* A sequential resilver has no checksum which makes
* combinatoral reconstruction impossible. This code
* path is unreachable since raidz_checksum_verify()
* has no checksum to verify and must succeed.
*/
ASSERT3U(zio->io_priority, !=, ZIO_PRIORITY_REBUILD);
/*
* This isn't a typical situation -- either we got a
* read error or a child silently returned bad data.
* Read every block so we can try again with as much
* data and parity as we can track down. If we've
* already been through once before, all children will
* be marked as tried so we'll proceed to combinatorial
* reconstruction.
*/
int nread = 0;
for (int i = 0; i < rm->rm_nrows; i++) {
nread += vdev_raidz_read_all(zio,
rm->rm_row[i]);
}
if (nread != 0) {
/*
* Normally our stage is VDEV_IO_DONE, but if
* we've already called redone(), it will have
* changed to VDEV_IO_START, in which case we
* don't want to call redone() again.
*/
if (zio->io_stage != ZIO_STAGE_VDEV_IO_START)
zio_vdev_io_redone(zio);
return;
}
/*
* It would be too expensive to try every possible
* combination of failed sectors in every row, so
* instead we try every combination of failed current or
* past physical disk. This means that if the incorrect
* sectors were all on Nparity disks at any point in the
* past, we will find the correct data. The only known
* case where this is less durable than a non-expanded
* RAIDZ, is if we have a silent failure during
* expansion. In that case, one block could be
* partially in the old format and partially in the
* new format, so we'd lost some sectors from the old
* format and some from the new format.
*
* e.g. logical_width=4 physical_width=6
* the 15 (6+5+4) possible failed disks are:
* width=6 child=0
* width=6 child=1
* width=6 child=2
* width=6 child=3
* width=6 child=4
* width=6 child=5
* width=5 child=0
* width=5 child=1
* width=5 child=2
* width=5 child=3
* width=5 child=4
* width=4 child=0
* width=4 child=1
* width=4 child=2
* width=4 child=3
* And we will try every combination of Nparity of these
* failing.
*
* As a first pass, we can generate every combo,
* and try reconstructing, ignoring any known
* failures. If any row has too many known + simulated
* failures, then we bail on reconstructing with this
* number of simulated failures. As an improvement,
* we could detect the number of whole known failures
* (i.e. we have known failures on these disks for
* every row; the disks never succeeded), and
* subtract that from the max # failures to simulate.
* We could go even further like the current
* combrec code, but that doesn't seem like it
* gains us very much. If we simulate a failure
* that is also a known failure, that's fine.
*/
zio->io_error = vdev_raidz_combrec(zio);
if (zio->io_error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
vdev_raidz_io_done_unrecoverable(zio);
}
}
}
done:
if (rm->rm_lr != NULL) {
zfs_rangelock_exit(rm->rm_lr);
rm->rm_lr = NULL;
}
}
static void
vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
if (faulted > vdrz->vd_nparity)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
else if (degraded + faulted != 0)
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
else
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
/*
* Determine if any portion of the provided block resides on a child vdev
* with a dirty DTL and therefore needs to be resilvered. The function
* assumes that at least one DTL is dirty which implies that full stripe
* width blocks must be resilvered.
*/
static boolean_t
vdev_raidz_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
/*
* If we're in the middle of a RAIDZ expansion, this block may be in
* the old and/or new location. For simplicity, always resilver it.
*/
if (vdrz->vn_vre.vre_state == DSS_SCANNING)
return (B_TRUE);
uint64_t dcols = vd->vdev_children;
uint64_t nparity = vdrz->vd_nparity;
uint64_t ashift = vd->vdev_top->vdev_ashift;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = DVA_GET_OFFSET(dva) >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = ((psize - 1) >> ashift) + 1;
/* The first column for this stripe. */
uint64_t f = b % dcols;
/* Unreachable by sequential resilver. */
ASSERT3U(phys_birth, !=, TXG_UNKNOWN);
if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
return (B_FALSE);
if (s + nparity >= dcols)
return (B_TRUE);
for (uint64_t c = 0; c < s + nparity; c++) {
uint64_t devidx = (f + c) % dcols;
vdev_t *cvd = vd->vdev_child[devidx];
/*
* dsl_scan_need_resilver() already checked vd with
* vdev_dtl_contains(). So here just check cvd with
* vdev_dtl_empty(), cheaper and a good approximation.
*/
if (!vdev_dtl_empty(cvd, DTL_PARTIAL))
return (B_TRUE);
}
return (B_FALSE);
}
static void
-vdev_raidz_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
- range_seg64_t *physical_rs, range_seg64_t *remain_rs)
+vdev_raidz_xlate(vdev_t *cvd, const zfs_range_seg64_t *logical_rs,
+ zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
{
(void) remain_rs;
vdev_t *raidvd = cvd->vdev_parent;
ASSERT(raidvd->vdev_ops == &vdev_raidz_ops);
vdev_raidz_t *vdrz = raidvd->vdev_tsd;
if (vdrz->vn_vre.vre_state == DSS_SCANNING) {
/*
* We're in the middle of expansion, in which case the
* translation is in flux. Any answer we give may be wrong
* by the time we return, so it isn't safe for the caller to
* act on it. Therefore we say that this range isn't present
* on any children. The only consumers of this are "zpool
* initialize" and trimming, both of which are "best effort"
* anyway.
*/
physical_rs->rs_start = physical_rs->rs_end = 0;
remain_rs->rs_start = remain_rs->rs_end = 0;
return;
}
uint64_t width = vdrz->vd_physical_width;
uint64_t tgt_col = cvd->vdev_id;
uint64_t ashift = raidvd->vdev_top->vdev_ashift;
/* make sure the offsets are block-aligned */
ASSERT0(logical_rs->rs_start % (1 << ashift));
ASSERT0(logical_rs->rs_end % (1 << ashift));
uint64_t b_start = logical_rs->rs_start >> ashift;
uint64_t b_end = logical_rs->rs_end >> ashift;
uint64_t start_row = 0;
if (b_start > tgt_col) /* avoid underflow */
start_row = ((b_start - tgt_col - 1) / width) + 1;
uint64_t end_row = 0;
if (b_end > tgt_col)
end_row = ((b_end - tgt_col - 1) / width) + 1;
physical_rs->rs_start = start_row << ashift;
physical_rs->rs_end = end_row << ashift;
ASSERT3U(physical_rs->rs_start, <=, logical_rs->rs_start);
ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
logical_rs->rs_end - logical_rs->rs_start);
}
static void
raidz_reflow_sync(void *arg, dmu_tx_t *tx)
{
spa_t *spa = arg;
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
/*
* Ensure there are no i/os to the range that is being committed.
*/
uint64_t old_offset = RRSS_GET_OFFSET(&spa->spa_uberblock);
ASSERT3U(vre->vre_offset_pertxg[txgoff], >=, old_offset);
mutex_enter(&vre->vre_lock);
uint64_t new_offset =
MIN(vre->vre_offset_pertxg[txgoff], vre->vre_failed_offset);
/*
* We should not have committed anything that failed.
*/
VERIFY3U(vre->vre_failed_offset, >=, old_offset);
mutex_exit(&vre->vre_lock);
zfs_locked_range_t *lr = zfs_rangelock_enter(&vre->vre_rangelock,
old_offset, new_offset - old_offset,
RL_WRITER);
/*
* Update the uberblock that will be written when this txg completes.
*/
RAIDZ_REFLOW_SET(&spa->spa_uberblock,
RRSS_SCRATCH_INVALID_SYNCED_REFLOW, new_offset);
vre->vre_offset_pertxg[txgoff] = 0;
zfs_rangelock_exit(lr);
mutex_enter(&vre->vre_lock);
vre->vre_bytes_copied += vre->vre_bytes_copied_pertxg[txgoff];
vre->vre_bytes_copied_pertxg[txgoff] = 0;
mutex_exit(&vre->vre_lock);
vdev_t *vd = vdev_lookup_top(spa, vre->vre_vdev_id);
VERIFY0(zap_update(spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_BYTES_COPIED,
sizeof (vre->vre_bytes_copied), 1, &vre->vre_bytes_copied, tx));
}
static void
raidz_reflow_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_t *spa = arg;
vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
vdev_t *raidvd = vdev_lookup_top(spa, vre->vre_vdev_id);
vdev_raidz_t *vdrz = raidvd->vdev_tsd;
for (int i = 0; i < TXG_SIZE; i++)
VERIFY0(vre->vre_offset_pertxg[i]);
reflow_node_t *re = kmem_zalloc(sizeof (*re), KM_SLEEP);
re->re_txg = tx->tx_txg + TXG_CONCURRENT_STATES;
re->re_logical_width = vdrz->vd_physical_width;
mutex_enter(&vdrz->vd_expand_lock);
avl_add(&vdrz->vd_expand_txgs, re);
mutex_exit(&vdrz->vd_expand_lock);
vdev_t *vd = vdev_lookup_top(spa, vre->vre_vdev_id);
/*
* Dirty the config so that the updated ZPOOL_CONFIG_RAIDZ_EXPAND_TXGS
* will get written (based on vd_expand_txgs).
*/
vdev_config_dirty(vd);
/*
* Before we change vre_state, the on-disk state must reflect that we
* have completed all copying, so that vdev_raidz_io_start() can use
* vre_state to determine if the reflow is in progress. See also the
* end of spa_raidz_expand_thread().
*/
VERIFY3U(RRSS_GET_OFFSET(&spa->spa_ubsync), ==,
raidvd->vdev_ms_count << raidvd->vdev_ms_shift);
vre->vre_end_time = gethrestime_sec();
vre->vre_state = DSS_FINISHED;
uint64_t state = vre->vre_state;
VERIFY0(zap_update(spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_STATE,
sizeof (state), 1, &state, tx));
uint64_t end_time = vre->vre_end_time;
VERIFY0(zap_update(spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_END_TIME,
sizeof (end_time), 1, &end_time, tx));
spa->spa_uberblock.ub_raidz_reflow_info = 0;
spa_history_log_internal(spa, "raidz vdev expansion completed", tx,
"%s vdev %llu new width %llu", spa_name(spa),
(unsigned long long)vd->vdev_id,
(unsigned long long)vd->vdev_children);
spa->spa_raidz_expand = NULL;
raidvd->vdev_rz_expanding = B_FALSE;
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
spa_notify_waiters(spa);
/*
* While we're in syncing context take the opportunity to
* setup a scrub. All the data has been sucessfully copied
* but we have not validated any checksums.
*/
setup_sync_arg_t setup_sync_arg = {
.func = POOL_SCAN_SCRUB,
.txgstart = 0,
.txgend = 0,
};
if (zfs_scrub_after_expand &&
dsl_scan_setup_check(&setup_sync_arg.func, tx) == 0) {
dsl_scan_setup_sync(&setup_sync_arg, tx);
}
}
/*
* State of one copy batch.
*/
typedef struct raidz_reflow_arg {
vdev_raidz_expand_t *rra_vre; /* Global expantion state. */
zfs_locked_range_t *rra_lr; /* Range lock of this batch. */
uint64_t rra_txg; /* TXG of this batch. */
uint_t rra_ashift; /* Ashift of the vdev. */
uint32_t rra_tbd; /* Number of in-flight ZIOs. */
uint32_t rra_writes; /* Number of write ZIOs. */
zio_t *rra_zio[]; /* Write ZIO pointers. */
} raidz_reflow_arg_t;
/*
* Write of the new location on one child is done. Once all of them are done
* we can unlock and free everything.
*/
static void
raidz_reflow_write_done(zio_t *zio)
{
raidz_reflow_arg_t *rra = zio->io_private;
vdev_raidz_expand_t *vre = rra->rra_vre;
abd_free(zio->io_abd);
mutex_enter(&vre->vre_lock);
if (zio->io_error != 0) {
/* Force a reflow pause on errors */
vre->vre_failed_offset =
MIN(vre->vre_failed_offset, rra->rra_lr->lr_offset);
}
ASSERT3U(vre->vre_outstanding_bytes, >=, zio->io_size);
vre->vre_outstanding_bytes -= zio->io_size;
if (rra->rra_lr->lr_offset + rra->rra_lr->lr_length <
vre->vre_failed_offset) {
vre->vre_bytes_copied_pertxg[rra->rra_txg & TXG_MASK] +=
zio->io_size;
}
cv_signal(&vre->vre_cv);
boolean_t done = (--rra->rra_tbd == 0);
mutex_exit(&vre->vre_lock);
if (!done)
return;
spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
zfs_rangelock_exit(rra->rra_lr);
kmem_free(rra, sizeof (*rra) + sizeof (zio_t *) * rra->rra_writes);
}
/*
* Read of the old location on one child is done. Once all of them are done
* writes should have all the data and we can issue them.
*/
static void
raidz_reflow_read_done(zio_t *zio)
{
raidz_reflow_arg_t *rra = zio->io_private;
vdev_raidz_expand_t *vre = rra->rra_vre;
/* Reads of only one block use write ABDs. For bigger free gangs. */
if (zio->io_size > (1 << rra->rra_ashift))
abd_free(zio->io_abd);
/*
* If the read failed, or if it was done on a vdev that is not fully
* healthy (e.g. a child that has a resilver in progress), we may not
* have the correct data. Note that it's OK if the write proceeds.
* It may write garbage but the location is otherwise unused and we
* will retry later due to vre_failed_offset.
*/
if (zio->io_error != 0 || !vdev_dtl_empty(zio->io_vd, DTL_MISSING)) {
zfs_dbgmsg("reflow read failed off=%llu size=%llu txg=%llu "
"err=%u partial_dtl_empty=%u missing_dtl_empty=%u",
(long long)rra->rra_lr->lr_offset,
(long long)rra->rra_lr->lr_length,
(long long)rra->rra_txg,
zio->io_error,
vdev_dtl_empty(zio->io_vd, DTL_PARTIAL),
vdev_dtl_empty(zio->io_vd, DTL_MISSING));
mutex_enter(&vre->vre_lock);
/* Force a reflow pause on errors */
vre->vre_failed_offset =
MIN(vre->vre_failed_offset, rra->rra_lr->lr_offset);
mutex_exit(&vre->vre_lock);
}
if (atomic_dec_32_nv(&rra->rra_tbd) > 0)
return;
uint32_t writes = rra->rra_tbd = rra->rra_writes;
for (uint64_t i = 0; i < writes; i++)
zio_nowait(rra->rra_zio[i]);
}
static void
raidz_reflow_record_progress(vdev_raidz_expand_t *vre, uint64_t offset,
dmu_tx_t *tx)
{
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (offset == 0)
return;
mutex_enter(&vre->vre_lock);
ASSERT3U(vre->vre_offset, <=, offset);
vre->vre_offset = offset;
mutex_exit(&vre->vre_lock);
if (vre->vre_offset_pertxg[txgoff] == 0) {
dsl_sync_task_nowait(dmu_tx_pool(tx), raidz_reflow_sync,
spa, tx);
}
vre->vre_offset_pertxg[txgoff] = offset;
}
static boolean_t
vdev_raidz_expand_child_replacing(vdev_t *raidz_vd)
{
for (int i = 0; i < raidz_vd->vdev_children; i++) {
/* Quick check if a child is being replaced */
if (!raidz_vd->vdev_child[i]->vdev_ops->vdev_op_leaf)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
-raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, range_tree_t *rt,
+raidz_reflow_impl(vdev_t *vd, vdev_raidz_expand_t *vre, zfs_range_tree_t *rt,
dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
uint_t ashift = vd->vdev_top->vdev_ashift;
- range_seg_t *rs = range_tree_first(rt);
+ zfs_range_seg_t *rs = zfs_range_tree_first(rt);
if (rt == NULL)
return (B_FALSE);
- uint64_t offset = rs_get_start(rs, rt);
+ uint64_t offset = zfs_rs_get_start(rs, rt);
ASSERT(IS_P2ALIGNED(offset, 1 << ashift));
- uint64_t size = rs_get_end(rs, rt) - offset;
+ uint64_t size = zfs_rs_get_end(rs, rt) - offset;
ASSERT3U(size, >=, 1 << ashift);
ASSERT(IS_P2ALIGNED(size, 1 << ashift));
uint64_t blkid = offset >> ashift;
uint_t old_children = vd->vdev_children - 1;
/*
* We can only progress to the point that writes will not overlap
* with blocks whose progress has not yet been recorded on disk.
* Since partially-copied rows are still read from the old location,
* we need to stop one row before the sector-wise overlap, to prevent
* row-wise overlap.
*
* Note that even if we are skipping over a large unallocated region,
* we can't move the on-disk progress to `offset`, because concurrent
* writes/allocations could still use the currently-unallocated
* region.
*/
uint64_t ubsync_blkid =
RRSS_GET_OFFSET(&spa->spa_ubsync) >> ashift;
uint64_t next_overwrite_blkid = ubsync_blkid +
ubsync_blkid / old_children - old_children;
VERIFY3U(next_overwrite_blkid, >, ubsync_blkid);
if (blkid >= next_overwrite_blkid) {
raidz_reflow_record_progress(vre,
next_overwrite_blkid << ashift, tx);
return (B_TRUE);
}
size = MIN(size, raidz_expand_max_copy_bytes);
size = MIN(size, (uint64_t)old_children *
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE));
size = MAX(size, 1 << ashift);
uint_t blocks = MIN(size >> ashift, next_overwrite_blkid - blkid);
size = (uint64_t)blocks << ashift;
- range_tree_remove(rt, offset, size);
+ zfs_range_tree_remove(rt, offset, size);
uint_t reads = MIN(blocks, old_children);
uint_t writes = MIN(blocks, vd->vdev_children);
raidz_reflow_arg_t *rra = kmem_zalloc(sizeof (*rra) +
sizeof (zio_t *) * writes, KM_SLEEP);
rra->rra_vre = vre;
rra->rra_lr = zfs_rangelock_enter(&vre->vre_rangelock,
offset, size, RL_WRITER);
rra->rra_txg = dmu_tx_get_txg(tx);
rra->rra_ashift = ashift;
rra->rra_tbd = reads;
rra->rra_writes = writes;
raidz_reflow_record_progress(vre, offset + size, tx);
/*
* SCL_STATE will be released when the read and write are done,
* by raidz_reflow_write_done().
*/
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
/* check if a replacing vdev was added, if so treat it as an error */
if (vdev_raidz_expand_child_replacing(vd)) {
zfs_dbgmsg("replacing vdev encountered, reflow paused at "
"offset=%llu txg=%llu",
(long long)rra->rra_lr->lr_offset,
(long long)rra->rra_txg);
mutex_enter(&vre->vre_lock);
vre->vre_failed_offset =
MIN(vre->vre_failed_offset, rra->rra_lr->lr_offset);
cv_signal(&vre->vre_cv);
mutex_exit(&vre->vre_lock);
/* drop everything we acquired */
spa_config_exit(spa, SCL_STATE, spa);
zfs_rangelock_exit(rra->rra_lr);
kmem_free(rra, sizeof (*rra) + sizeof (zio_t *) * writes);
return (B_TRUE);
}
mutex_enter(&vre->vre_lock);
vre->vre_outstanding_bytes += size;
mutex_exit(&vre->vre_lock);
/* Allocate ABD and ZIO for each child we write. */
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
zio_t *pio = spa->spa_txg_zio[txgoff];
uint_t b = blocks / vd->vdev_children;
uint_t bb = blocks % vd->vdev_children;
for (uint_t i = 0; i < writes; i++) {
uint_t n = b + (i < bb);
abd_t *abd = abd_alloc_for_io(n << ashift, B_FALSE);
rra->rra_zio[i] = zio_vdev_child_io(pio, NULL,
vd->vdev_child[(blkid + i) % vd->vdev_children],
((blkid + i) / vd->vdev_children) << ashift,
abd, n << ashift, ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL, raidz_reflow_write_done, rra);
}
/*
* Allocate and issue ZIO for each child we read. For reads of only
* one block we can use respective writer ABDs, since they will also
* have only one block. For bigger reads create gang ABDs and fill
* them with respective blocks from writer ABDs.
*/
b = blocks / old_children;
bb = blocks % old_children;
for (uint_t i = 0; i < reads; i++) {
uint_t n = b + (i < bb);
abd_t *abd;
if (n > 1) {
abd = abd_alloc_gang();
for (uint_t j = 0; j < n; j++) {
uint_t b = j * old_children + i;
abd_t *cabd = abd_get_offset_size(
rra->rra_zio[b % vd->vdev_children]->io_abd,
(b / vd->vdev_children) << ashift,
1 << ashift);
abd_gang_add(abd, cabd, B_TRUE);
}
} else {
abd = rra->rra_zio[i]->io_abd;
}
zio_nowait(zio_vdev_child_io(pio, NULL,
vd->vdev_child[(blkid + i) % old_children],
((blkid + i) / old_children) << ashift, abd,
n << ashift, ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL, raidz_reflow_read_done, rra));
}
return (B_FALSE);
}
/*
* For testing (ztest specific)
*/
static void
raidz_expand_pause(uint_t pause_point)
{
while (raidz_expand_pause_point != 0 &&
raidz_expand_pause_point <= pause_point)
delay(hz);
}
static void
raidz_scratch_child_done(zio_t *zio)
{
zio_t *pio = zio->io_private;
mutex_enter(&pio->io_lock);
pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
mutex_exit(&pio->io_lock);
}
/*
* Reflow the beginning portion of the vdev into an intermediate scratch area
* in memory and on disk. This operation must be persisted on disk before we
* proceed to overwrite the beginning portion with the reflowed data.
*
* This multi-step task can fail to complete if disk errors are encountered
* and we can return here after a pause (waiting for disk to become healthy).
*/
static void
raidz_reflow_scratch_sync(void *arg, dmu_tx_t *tx)
{
vdev_raidz_expand_t *vre = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
zio_t *pio;
int error;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_t *raidvd = vdev_lookup_top(spa, vre->vre_vdev_id);
int ashift = raidvd->vdev_ashift;
uint64_t write_size = P2ALIGN_TYPED(VDEV_BOOT_SIZE, 1 << ashift,
uint64_t);
uint64_t logical_size = write_size * raidvd->vdev_children;
uint64_t read_size =
P2ROUNDUP(DIV_ROUND_UP(logical_size, (raidvd->vdev_children - 1)),
1 << ashift);
/*
* The scratch space must be large enough to get us to the point
* that one row does not overlap itself when moved. This is checked
* by vdev_raidz_attach_check().
*/
VERIFY3U(write_size, >=, raidvd->vdev_children << ashift);
VERIFY3U(write_size, <=, VDEV_BOOT_SIZE);
VERIFY3U(write_size, <=, read_size);
zfs_locked_range_t *lr = zfs_rangelock_enter(&vre->vre_rangelock,
0, logical_size, RL_WRITER);
abd_t **abds = kmem_alloc(raidvd->vdev_children * sizeof (abd_t *),
KM_SLEEP);
for (int i = 0; i < raidvd->vdev_children; i++) {
abds[i] = abd_alloc_linear(read_size, B_FALSE);
}
raidz_expand_pause(RAIDZ_EXPAND_PAUSE_PRE_SCRATCH_1);
/*
* If we have already written the scratch area then we must read from
* there, since new writes were redirected there while we were paused
* or the original location may have been partially overwritten with
* reflowed data.
*/
if (RRSS_GET_STATE(&spa->spa_ubsync) == RRSS_SCRATCH_VALID) {
VERIFY3U(RRSS_GET_OFFSET(&spa->spa_ubsync), ==, logical_size);
/*
* Read from scratch space.
*/
pio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
for (int i = 0; i < raidvd->vdev_children; i++) {
/*
* Note: zio_vdev_child_io() adds VDEV_LABEL_START_SIZE
* to the offset to calculate the physical offset to
* write to. Passing in a negative offset makes us
* access the scratch area.
*/
zio_nowait(zio_vdev_child_io(pio, NULL,
raidvd->vdev_child[i],
VDEV_BOOT_OFFSET - VDEV_LABEL_START_SIZE, abds[i],
write_size, ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL, raidz_scratch_child_done, pio));
}
error = zio_wait(pio);
if (error != 0) {
zfs_dbgmsg("reflow: error %d reading scratch location",
error);
goto io_error_exit;
}
goto overwrite;
}
/*
* Read from original location.
*/
pio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
for (int i = 0; i < raidvd->vdev_children - 1; i++) {
ASSERT0(vdev_is_dead(raidvd->vdev_child[i]));
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
0, abds[i], read_size, ZIO_TYPE_READ,
ZIO_PRIORITY_REMOVAL, ZIO_FLAG_CANFAIL,
raidz_scratch_child_done, pio));
}
error = zio_wait(pio);
if (error != 0) {
zfs_dbgmsg("reflow: error %d reading original location", error);
io_error_exit:
for (int i = 0; i < raidvd->vdev_children; i++)
abd_free(abds[i]);
kmem_free(abds, raidvd->vdev_children * sizeof (abd_t *));
zfs_rangelock_exit(lr);
spa_config_exit(spa, SCL_STATE, FTAG);
return;
}
raidz_expand_pause(RAIDZ_EXPAND_PAUSE_PRE_SCRATCH_2);
/*
* Reflow in memory.
*/
uint64_t logical_sectors = logical_size >> ashift;
for (int i = raidvd->vdev_children - 1; i < logical_sectors; i++) {
int oldchild = i % (raidvd->vdev_children - 1);
uint64_t oldoff = (i / (raidvd->vdev_children - 1)) << ashift;
int newchild = i % raidvd->vdev_children;
uint64_t newoff = (i / raidvd->vdev_children) << ashift;
/* a single sector should not be copying over itself */
ASSERT(!(newchild == oldchild && newoff == oldoff));
abd_copy_off(abds[newchild], abds[oldchild],
newoff, oldoff, 1 << ashift);
}
/*
* Verify that we filled in everything we intended to (write_size on
* each child).
*/
VERIFY0(logical_sectors % raidvd->vdev_children);
VERIFY3U((logical_sectors / raidvd->vdev_children) << ashift, ==,
write_size);
/*
* Write to scratch location (boot area).
*/
pio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
for (int i = 0; i < raidvd->vdev_children; i++) {
/*
* Note: zio_vdev_child_io() adds VDEV_LABEL_START_SIZE to
* the offset to calculate the physical offset to write to.
* Passing in a negative offset lets us access the boot area.
*/
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
VDEV_BOOT_OFFSET - VDEV_LABEL_START_SIZE, abds[i],
write_size, ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL, raidz_scratch_child_done, pio));
}
error = zio_wait(pio);
if (error != 0) {
zfs_dbgmsg("reflow: error %d writing scratch location", error);
goto io_error_exit;
}
pio = zio_root(spa, NULL, NULL, 0);
zio_flush(pio, raidvd);
zio_wait(pio);
zfs_dbgmsg("reflow: wrote %llu bytes (logical) to scratch area",
(long long)logical_size);
raidz_expand_pause(RAIDZ_EXPAND_PAUSE_PRE_SCRATCH_3);
/*
* Update uberblock to indicate that scratch space is valid. This is
* needed because after this point, the real location may be
* overwritten. If we crash, we need to get the data from the
* scratch space, rather than the real location.
*
* Note: ub_timestamp is bumped so that vdev_uberblock_compare()
* will prefer this uberblock.
*/
RAIDZ_REFLOW_SET(&spa->spa_ubsync, RRSS_SCRATCH_VALID, logical_size);
spa->spa_ubsync.ub_timestamp++;
ASSERT0(vdev_uberblock_sync_list(&spa->spa_root_vdev, 1,
&spa->spa_ubsync, ZIO_FLAG_CONFIG_WRITER));
if (spa_multihost(spa))
mmp_update_uberblock(spa, &spa->spa_ubsync);
zfs_dbgmsg("reflow: uberblock updated "
"(txg %llu, SCRATCH_VALID, size %llu, ts %llu)",
(long long)spa->spa_ubsync.ub_txg,
(long long)logical_size,
(long long)spa->spa_ubsync.ub_timestamp);
raidz_expand_pause(RAIDZ_EXPAND_PAUSE_SCRATCH_VALID);
/*
* Overwrite with reflow'ed data.
*/
overwrite:
pio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
for (int i = 0; i < raidvd->vdev_children; i++) {
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
0, abds[i], write_size, ZIO_TYPE_WRITE,
ZIO_PRIORITY_REMOVAL, ZIO_FLAG_CANFAIL,
raidz_scratch_child_done, pio));
}
error = zio_wait(pio);
if (error != 0) {
/*
* When we exit early here and drop the range lock, new
* writes will go into the scratch area so we'll need to
* read from there when we return after pausing.
*/
zfs_dbgmsg("reflow: error %d writing real location", error);
/*
* Update the uberblock that is written when this txg completes.
*/
RAIDZ_REFLOW_SET(&spa->spa_uberblock, RRSS_SCRATCH_VALID,
logical_size);
goto io_error_exit;
}
pio = zio_root(spa, NULL, NULL, 0);
zio_flush(pio, raidvd);
zio_wait(pio);
zfs_dbgmsg("reflow: overwrote %llu bytes (logical) to real location",
(long long)logical_size);
for (int i = 0; i < raidvd->vdev_children; i++)
abd_free(abds[i]);
kmem_free(abds, raidvd->vdev_children * sizeof (abd_t *));
raidz_expand_pause(RAIDZ_EXPAND_PAUSE_SCRATCH_REFLOWED);
/*
* Update uberblock to indicate that the initial part has been
* reflow'ed. This is needed because after this point (when we exit
* the rangelock), we allow regular writes to this region, which will
* be written to the new location only (because reflow_offset_next ==
* reflow_offset_synced). If we crashed and re-copied from the
* scratch space, we would lose the regular writes.
*/
RAIDZ_REFLOW_SET(&spa->spa_ubsync, RRSS_SCRATCH_INVALID_SYNCED,
logical_size);
spa->spa_ubsync.ub_timestamp++;
ASSERT0(vdev_uberblock_sync_list(&spa->spa_root_vdev, 1,
&spa->spa_ubsync, ZIO_FLAG_CONFIG_WRITER));
if (spa_multihost(spa))
mmp_update_uberblock(spa, &spa->spa_ubsync);
zfs_dbgmsg("reflow: uberblock updated "
"(txg %llu, SCRATCH_NOT_IN_USE, size %llu, ts %llu)",
(long long)spa->spa_ubsync.ub_txg,
(long long)logical_size,
(long long)spa->spa_ubsync.ub_timestamp);
raidz_expand_pause(RAIDZ_EXPAND_PAUSE_SCRATCH_POST_REFLOW_1);
/*
* Update progress.
*/
vre->vre_offset = logical_size;
zfs_rangelock_exit(lr);
spa_config_exit(spa, SCL_STATE, FTAG);
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
vre->vre_offset_pertxg[txgoff] = vre->vre_offset;
vre->vre_bytes_copied_pertxg[txgoff] = vre->vre_bytes_copied;
/*
* Note - raidz_reflow_sync() will update the uberblock state to
* RRSS_SCRATCH_INVALID_SYNCED_REFLOW
*/
raidz_reflow_sync(spa, tx);
raidz_expand_pause(RAIDZ_EXPAND_PAUSE_SCRATCH_POST_REFLOW_2);
}
/*
* We crashed in the middle of raidz_reflow_scratch_sync(); complete its work
* here. No other i/o can be in progress, so we don't need the vre_rangelock.
*/
void
vdev_raidz_reflow_copy_scratch(spa_t *spa)
{
vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
uint64_t logical_size = RRSS_GET_OFFSET(&spa->spa_uberblock);
ASSERT3U(RRSS_GET_STATE(&spa->spa_uberblock), ==, RRSS_SCRATCH_VALID);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_t *raidvd = vdev_lookup_top(spa, vre->vre_vdev_id);
ASSERT0(logical_size % raidvd->vdev_children);
uint64_t write_size = logical_size / raidvd->vdev_children;
zio_t *pio;
/*
* Read from scratch space.
*/
abd_t **abds = kmem_alloc(raidvd->vdev_children * sizeof (abd_t *),
KM_SLEEP);
for (int i = 0; i < raidvd->vdev_children; i++) {
abds[i] = abd_alloc_linear(write_size, B_FALSE);
}
pio = zio_root(spa, NULL, NULL, 0);
for (int i = 0; i < raidvd->vdev_children; i++) {
/*
* Note: zio_vdev_child_io() adds VDEV_LABEL_START_SIZE to
* the offset to calculate the physical offset to write to.
* Passing in a negative offset lets us access the boot area.
*/
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
VDEV_BOOT_OFFSET - VDEV_LABEL_START_SIZE, abds[i],
write_size, ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL, 0,
raidz_scratch_child_done, pio));
}
zio_wait(pio);
/*
* Overwrite real location with reflow'ed data.
*/
pio = zio_root(spa, NULL, NULL, 0);
for (int i = 0; i < raidvd->vdev_children; i++) {
zio_nowait(zio_vdev_child_io(pio, NULL, raidvd->vdev_child[i],
0, abds[i], write_size, ZIO_TYPE_WRITE,
ZIO_PRIORITY_REMOVAL, 0,
raidz_scratch_child_done, pio));
}
zio_wait(pio);
pio = zio_root(spa, NULL, NULL, 0);
zio_flush(pio, raidvd);
zio_wait(pio);
zfs_dbgmsg("reflow recovery: overwrote %llu bytes (logical) "
"to real location", (long long)logical_size);
for (int i = 0; i < raidvd->vdev_children; i++)
abd_free(abds[i]);
kmem_free(abds, raidvd->vdev_children * sizeof (abd_t *));
/*
* Update uberblock.
*/
RAIDZ_REFLOW_SET(&spa->spa_ubsync,
RRSS_SCRATCH_INVALID_SYNCED_ON_IMPORT, logical_size);
spa->spa_ubsync.ub_timestamp++;
VERIFY0(vdev_uberblock_sync_list(&spa->spa_root_vdev, 1,
&spa->spa_ubsync, ZIO_FLAG_CONFIG_WRITER));
if (spa_multihost(spa))
mmp_update_uberblock(spa, &spa->spa_ubsync);
zfs_dbgmsg("reflow recovery: uberblock updated "
"(txg %llu, SCRATCH_NOT_IN_USE, size %llu, ts %llu)",
(long long)spa->spa_ubsync.ub_txg,
(long long)logical_size,
(long long)spa->spa_ubsync.ub_timestamp);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool,
spa_first_txg(spa));
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
vre->vre_offset = logical_size;
vre->vre_offset_pertxg[txgoff] = vre->vre_offset;
vre->vre_bytes_copied_pertxg[txgoff] = vre->vre_bytes_copied;
/*
* Note that raidz_reflow_sync() will update the uberblock once more
*/
raidz_reflow_sync(spa, tx);
dmu_tx_commit(tx);
spa_config_exit(spa, SCL_STATE, FTAG);
}
static boolean_t
spa_raidz_expand_thread_check(void *arg, zthr_t *zthr)
{
(void) zthr;
spa_t *spa = arg;
return (spa->spa_raidz_expand != NULL &&
!spa->spa_raidz_expand->vre_waiting_for_resilver);
}
/*
* RAIDZ expansion background thread
*
* Can be called multiple times if the reflow is paused
*/
static void
spa_raidz_expand_thread(void *arg, zthr_t *zthr)
{
spa_t *spa = arg;
vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
if (RRSS_GET_STATE(&spa->spa_ubsync) == RRSS_SCRATCH_VALID)
vre->vre_offset = 0;
else
vre->vre_offset = RRSS_GET_OFFSET(&spa->spa_ubsync);
/* Reflow the begining portion using the scratch area */
if (vre->vre_offset == 0) {
VERIFY0(dsl_sync_task(spa_name(spa),
NULL, raidz_reflow_scratch_sync,
vre, 0, ZFS_SPACE_CHECK_NONE));
/* if we encountered errors then pause */
if (vre->vre_offset == 0) {
mutex_enter(&vre->vre_lock);
vre->vre_waiting_for_resilver = B_TRUE;
mutex_exit(&vre->vre_lock);
return;
}
}
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_t *raidvd = vdev_lookup_top(spa, vre->vre_vdev_id);
uint64_t guid = raidvd->vdev_guid;
/* Iterate over all the remaining metaslabs */
for (uint64_t i = vre->vre_offset >> raidvd->vdev_ms_shift;
i < raidvd->vdev_ms_count &&
!zthr_iscancelled(zthr) &&
vre->vre_failed_offset == UINT64_MAX; i++) {
metaslab_t *msp = raidvd->vdev_ms[i];
metaslab_disable(msp);
mutex_enter(&msp->ms_lock);
/*
* The metaslab may be newly created (for the expanded
* space), in which case its trees won't exist yet,
* so we need to bail out early.
*/
if (msp->ms_new) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
continue;
}
VERIFY0(metaslab_load(msp));
/*
* We want to copy everything except the free (allocatable)
* space. Note that there may be a little bit more free
* space (e.g. in ms_defer), and it's fine to copy that too.
*/
uint64_t shift, start;
- range_seg_type_t type = metaslab_calculate_range_tree_type(
+ zfs_range_seg_type_t type = metaslab_calculate_range_tree_type(
raidvd, msp, &start, &shift);
- range_tree_t *rt = range_tree_create(NULL, type, NULL,
+ zfs_range_tree_t *rt = zfs_range_tree_create(NULL, type, NULL,
start, shift);
- range_tree_add(rt, msp->ms_start, msp->ms_size);
- range_tree_walk(msp->ms_allocatable, range_tree_remove, rt);
+ zfs_range_tree_add(rt, msp->ms_start, msp->ms_size);
+ zfs_range_tree_walk(msp->ms_allocatable, zfs_range_tree_remove,
+ rt);
mutex_exit(&msp->ms_lock);
/*
* Force the last sector of each metaslab to be copied. This
* ensures that we advance the on-disk progress to the end of
* this metaslab while the metaslab is disabled. Otherwise, we
* could move past this metaslab without advancing the on-disk
* progress, and then an allocation to this metaslab would not
* be copied.
*/
int sectorsz = 1 << raidvd->vdev_ashift;
uint64_t ms_last_offset = msp->ms_start +
msp->ms_size - sectorsz;
- if (!range_tree_contains(rt, ms_last_offset, sectorsz)) {
- range_tree_add(rt, ms_last_offset, sectorsz);
+ if (!zfs_range_tree_contains(rt, ms_last_offset, sectorsz)) {
+ zfs_range_tree_add(rt, ms_last_offset, sectorsz);
}
/*
* When we are resuming from a paused expansion (i.e.
* when importing a pool with a expansion in progress),
* discard any state that we have already processed.
*/
if (vre->vre_offset > msp->ms_start) {
- range_tree_clear(rt, msp->ms_start,
+ zfs_range_tree_clear(rt, msp->ms_start,
vre->vre_offset - msp->ms_start);
}
while (!zthr_iscancelled(zthr) &&
- !range_tree_is_empty(rt) &&
+ !zfs_range_tree_is_empty(rt) &&
vre->vre_failed_offset == UINT64_MAX) {
/*
* We need to periodically drop the config lock so that
* writers can get in. Additionally, we can't wait
* for a txg to sync while holding a config lock
* (since a waiting writer could cause a 3-way deadlock
* with the sync thread, which also gets a config
* lock for reader). So we can't hold the config lock
* while calling dmu_tx_assign().
*/
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* If requested, pause the reflow when the amount
* specified by raidz_expand_max_reflow_bytes is reached
*
* This pause is only used during testing or debugging.
*/
while (raidz_expand_max_reflow_bytes != 0 &&
raidz_expand_max_reflow_bytes <=
vre->vre_bytes_copied && !zthr_iscancelled(zthr)) {
delay(hz);
}
mutex_enter(&vre->vre_lock);
while (vre->vre_outstanding_bytes >
raidz_expand_max_copy_bytes) {
cv_wait(&vre->vre_cv, &vre->vre_lock);
}
mutex_exit(&vre->vre_lock);
dmu_tx_t *tx =
dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
/*
* Reacquire the vdev_config lock. Theoretically, the
* vdev_t that we're expanding may have changed.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
raidvd = vdev_lookup_top(spa, vre->vre_vdev_id);
boolean_t needsync =
raidz_reflow_impl(raidvd, vre, rt, tx);
dmu_tx_commit(tx);
if (needsync) {
spa_config_exit(spa, SCL_CONFIG, FTAG);
txg_wait_synced(spa->spa_dsl_pool, txg);
spa_config_enter(spa, SCL_CONFIG, FTAG,
RW_READER);
}
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_enable(msp, B_FALSE, B_FALSE);
- range_tree_vacate(rt, NULL, NULL);
- range_tree_destroy(rt);
+ zfs_range_tree_vacate(rt, NULL, NULL);
+ zfs_range_tree_destroy(rt);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
raidvd = vdev_lookup_top(spa, vre->vre_vdev_id);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* The txg_wait_synced() here ensures that all reflow zio's have
* completed, and vre_failed_offset has been set if necessary. It
* also ensures that the progress of the last raidz_reflow_sync() is
* written to disk before raidz_reflow_complete_sync() changes the
* in-memory vre_state. vdev_raidz_io_start() uses vre_state to
* determine if a reflow is in progress, in which case we may need to
* write to both old and new locations. Therefore we can only change
* vre_state once this is not necessary, which is once the on-disk
* progress (in spa_ubsync) has been set past any possible writes (to
* the end of the last metaslab).
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
if (!zthr_iscancelled(zthr) &&
vre->vre_offset == raidvd->vdev_ms_count << raidvd->vdev_ms_shift) {
/*
* We are not being canceled or paused, so the reflow must be
* complete. In that case also mark it as completed on disk.
*/
ASSERT3U(vre->vre_failed_offset, ==, UINT64_MAX);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
raidz_reflow_complete_sync, spa,
0, ZFS_SPACE_CHECK_NONE));
(void) vdev_online(spa, guid, ZFS_ONLINE_EXPAND, NULL);
} else {
/*
* Wait for all copy zio's to complete and for all the
* raidz_reflow_sync() synctasks to be run.
*/
spa_history_log_internal(spa, "reflow pause",
NULL, "offset=%llu failed_offset=%lld",
(long long)vre->vre_offset,
(long long)vre->vre_failed_offset);
mutex_enter(&vre->vre_lock);
if (vre->vre_failed_offset != UINT64_MAX) {
/*
* Reset progress so that we will retry everything
* after the point that something failed.
*/
vre->vre_offset = vre->vre_failed_offset;
vre->vre_failed_offset = UINT64_MAX;
vre->vre_waiting_for_resilver = B_TRUE;
}
mutex_exit(&vre->vre_lock);
}
}
void
spa_start_raidz_expansion_thread(spa_t *spa)
{
ASSERT3P(spa->spa_raidz_expand_zthr, ==, NULL);
spa->spa_raidz_expand_zthr = zthr_create("raidz_expand",
spa_raidz_expand_thread_check, spa_raidz_expand_thread,
spa, defclsyspri);
}
void
raidz_dtl_reassessed(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
if (spa->spa_raidz_expand != NULL) {
vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
/*
* we get called often from vdev_dtl_reassess() so make
* sure it's our vdev and any replacing is complete
*/
if (vd->vdev_top->vdev_id == vre->vre_vdev_id &&
!vdev_raidz_expand_child_replacing(vd->vdev_top)) {
mutex_enter(&vre->vre_lock);
if (vre->vre_waiting_for_resilver) {
vdev_dbgmsg(vd, "DTL reassessed, "
"continuing raidz expansion");
vre->vre_waiting_for_resilver = B_FALSE;
zthr_wakeup(spa->spa_raidz_expand_zthr);
}
mutex_exit(&vre->vre_lock);
}
}
}
int
vdev_raidz_attach_check(vdev_t *new_child)
{
vdev_t *raidvd = new_child->vdev_parent;
uint64_t new_children = raidvd->vdev_children;
/*
* We use the "boot" space as scratch space to handle overwriting the
* initial part of the vdev. If it is too small, then this expansion
* is not allowed. This would be very unusual (e.g. ashift > 13 and
* >200 children).
*/
if (new_children << raidvd->vdev_ashift > VDEV_BOOT_SIZE) {
return (EINVAL);
}
return (0);
}
void
vdev_raidz_attach_sync(void *arg, dmu_tx_t *tx)
{
vdev_t *new_child = arg;
spa_t *spa = new_child->vdev_spa;
vdev_t *raidvd = new_child->vdev_parent;
vdev_raidz_t *vdrz = raidvd->vdev_tsd;
ASSERT3P(raidvd->vdev_ops, ==, &vdev_raidz_ops);
ASSERT3P(raidvd->vdev_top, ==, raidvd);
ASSERT3U(raidvd->vdev_children, >, vdrz->vd_original_width);
ASSERT3U(raidvd->vdev_children, ==, vdrz->vd_physical_width + 1);
ASSERT3P(raidvd->vdev_child[raidvd->vdev_children - 1], ==,
new_child);
spa_feature_incr(spa, SPA_FEATURE_RAIDZ_EXPANSION, tx);
vdrz->vd_physical_width++;
VERIFY0(spa->spa_uberblock.ub_raidz_reflow_info);
vdrz->vn_vre.vre_vdev_id = raidvd->vdev_id;
vdrz->vn_vre.vre_offset = 0;
vdrz->vn_vre.vre_failed_offset = UINT64_MAX;
spa->spa_raidz_expand = &vdrz->vn_vre;
zthr_wakeup(spa->spa_raidz_expand_zthr);
/*
* Dirty the config so that ZPOOL_CONFIG_RAIDZ_EXPANDING will get
* written to the config.
*/
vdev_config_dirty(raidvd);
vdrz->vn_vre.vre_start_time = gethrestime_sec();
vdrz->vn_vre.vre_end_time = 0;
vdrz->vn_vre.vre_state = DSS_SCANNING;
vdrz->vn_vre.vre_bytes_copied = 0;
uint64_t state = vdrz->vn_vre.vre_state;
VERIFY0(zap_update(spa->spa_meta_objset,
raidvd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_STATE,
sizeof (state), 1, &state, tx));
uint64_t start_time = vdrz->vn_vre.vre_start_time;
VERIFY0(zap_update(spa->spa_meta_objset,
raidvd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_START_TIME,
sizeof (start_time), 1, &start_time, tx));
(void) zap_remove(spa->spa_meta_objset,
raidvd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_END_TIME, tx);
(void) zap_remove(spa->spa_meta_objset,
raidvd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_BYTES_COPIED, tx);
spa_history_log_internal(spa, "raidz vdev expansion started", tx,
"%s vdev %llu new width %llu", spa_name(spa),
(unsigned long long)raidvd->vdev_id,
(unsigned long long)raidvd->vdev_children);
}
int
vdev_raidz_load(vdev_t *vd)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
int err;
uint64_t state = DSS_NONE;
uint64_t start_time = 0;
uint64_t end_time = 0;
uint64_t bytes_copied = 0;
if (vd->vdev_top_zap != 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_STATE,
sizeof (state), 1, &state);
if (err != 0 && err != ENOENT)
return (err);
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_START_TIME,
sizeof (start_time), 1, &start_time);
if (err != 0 && err != ENOENT)
return (err);
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_END_TIME,
sizeof (end_time), 1, &end_time);
if (err != 0 && err != ENOENT)
return (err);
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_RAIDZ_EXPAND_BYTES_COPIED,
sizeof (bytes_copied), 1, &bytes_copied);
if (err != 0 && err != ENOENT)
return (err);
}
/*
* If we are in the middle of expansion, vre_state should have
* already been set by vdev_raidz_init().
*/
EQUIV(vdrz->vn_vre.vre_state == DSS_SCANNING, state == DSS_SCANNING);
vdrz->vn_vre.vre_state = (dsl_scan_state_t)state;
vdrz->vn_vre.vre_start_time = start_time;
vdrz->vn_vre.vre_end_time = end_time;
vdrz->vn_vre.vre_bytes_copied = bytes_copied;
return (0);
}
int
spa_raidz_expand_get_stats(spa_t *spa, pool_raidz_expand_stat_t *pres)
{
vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
if (vre == NULL) {
/* no removal in progress; find most recent completed */
for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[c];
if (vd->vdev_ops == &vdev_raidz_ops) {
vdev_raidz_t *vdrz = vd->vdev_tsd;
if (vdrz->vn_vre.vre_end_time != 0 &&
(vre == NULL ||
vdrz->vn_vre.vre_end_time >
vre->vre_end_time)) {
vre = &vdrz->vn_vre;
}
}
}
}
if (vre == NULL) {
return (SET_ERROR(ENOENT));
}
pres->pres_state = vre->vre_state;
pres->pres_expanding_vdev = vre->vre_vdev_id;
vdev_t *vd = vdev_lookup_top(spa, vre->vre_vdev_id);
pres->pres_to_reflow = vd->vdev_stat.vs_alloc;
mutex_enter(&vre->vre_lock);
pres->pres_reflowed = vre->vre_bytes_copied;
for (int i = 0; i < TXG_SIZE; i++)
pres->pres_reflowed += vre->vre_bytes_copied_pertxg[i];
mutex_exit(&vre->vre_lock);
pres->pres_start_time = vre->vre_start_time;
pres->pres_end_time = vre->vre_end_time;
pres->pres_waiting_for_resilver = vre->vre_waiting_for_resilver;
return (0);
}
/*
* Initialize private RAIDZ specific fields from the nvlist.
*/
static int
vdev_raidz_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
uint_t children;
nvlist_t **child;
int error = nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &children);
if (error != 0)
return (SET_ERROR(EINVAL));
uint64_t nparity;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) == 0) {
if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
return (SET_ERROR(EINVAL));
/*
* Previous versions could only support 1 or 2 parity
* device.
*/
if (nparity > 1 && spa_version(spa) < SPA_VERSION_RAIDZ2)
return (SET_ERROR(EINVAL));
else if (nparity > 2 && spa_version(spa) < SPA_VERSION_RAIDZ3)
return (SET_ERROR(EINVAL));
} else {
/*
* We require the parity to be specified for SPAs that
* support multiple parity levels.
*/
if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
return (SET_ERROR(EINVAL));
/*
* Otherwise, we default to 1 parity device for RAID-Z.
*/
nparity = 1;
}
vdev_raidz_t *vdrz = kmem_zalloc(sizeof (*vdrz), KM_SLEEP);
vdrz->vn_vre.vre_vdev_id = -1;
vdrz->vn_vre.vre_offset = UINT64_MAX;
vdrz->vn_vre.vre_failed_offset = UINT64_MAX;
mutex_init(&vdrz->vn_vre.vre_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vdrz->vn_vre.vre_cv, NULL, CV_DEFAULT, NULL);
zfs_rangelock_init(&vdrz->vn_vre.vre_rangelock, NULL, NULL);
mutex_init(&vdrz->vd_expand_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&vdrz->vd_expand_txgs, vdev_raidz_reflow_compare,
sizeof (reflow_node_t), offsetof(reflow_node_t, re_link));
vdrz->vd_physical_width = children;
vdrz->vd_nparity = nparity;
/* note, the ID does not exist when creating a pool */
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
&vdrz->vn_vre.vre_vdev_id);
boolean_t reflow_in_progress =
nvlist_exists(nv, ZPOOL_CONFIG_RAIDZ_EXPANDING);
if (reflow_in_progress) {
spa->spa_raidz_expand = &vdrz->vn_vre;
vdrz->vn_vre.vre_state = DSS_SCANNING;
}
vdrz->vd_original_width = children;
uint64_t *txgs;
unsigned int txgs_size = 0;
error = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_RAIDZ_EXPAND_TXGS,
&txgs, &txgs_size);
if (error == 0) {
for (int i = 0; i < txgs_size; i++) {
reflow_node_t *re = kmem_zalloc(sizeof (*re), KM_SLEEP);
re->re_txg = txgs[txgs_size - i - 1];
re->re_logical_width = vdrz->vd_physical_width - i;
if (reflow_in_progress)
re->re_logical_width--;
avl_add(&vdrz->vd_expand_txgs, re);
}
vdrz->vd_original_width = vdrz->vd_physical_width - txgs_size;
}
if (reflow_in_progress) {
vdrz->vd_original_width--;
zfs_dbgmsg("reflow_in_progress, %u wide, %d prior expansions",
children, txgs_size);
}
*tsd = vdrz;
return (0);
}
static void
vdev_raidz_fini(vdev_t *vd)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
if (vd->vdev_spa->spa_raidz_expand == &vdrz->vn_vre)
vd->vdev_spa->spa_raidz_expand = NULL;
reflow_node_t *re;
void *cookie = NULL;
avl_tree_t *tree = &vdrz->vd_expand_txgs;
while ((re = avl_destroy_nodes(tree, &cookie)) != NULL)
kmem_free(re, sizeof (*re));
avl_destroy(&vdrz->vd_expand_txgs);
mutex_destroy(&vdrz->vd_expand_lock);
mutex_destroy(&vdrz->vn_vre.vre_lock);
cv_destroy(&vdrz->vn_vre.vre_cv);
zfs_rangelock_fini(&vdrz->vn_vre.vre_rangelock);
kmem_free(vdrz, sizeof (*vdrz));
}
/*
* Add RAIDZ specific fields to the config nvlist.
*/
static void
vdev_raidz_config_generate(vdev_t *vd, nvlist_t *nv)
{
ASSERT3P(vd->vdev_ops, ==, &vdev_raidz_ops);
vdev_raidz_t *vdrz = vd->vdev_tsd;
/*
* Make sure someone hasn't managed to sneak a fancy new vdev
* into a crufty old storage pool.
*/
ASSERT(vdrz->vd_nparity == 1 ||
(vdrz->vd_nparity <= 2 &&
spa_version(vd->vdev_spa) >= SPA_VERSION_RAIDZ2) ||
(vdrz->vd_nparity <= 3 &&
spa_version(vd->vdev_spa) >= SPA_VERSION_RAIDZ3));
/*
* Note that we'll add these even on storage pools where they
* aren't strictly required -- older software will just ignore
* it.
*/
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdrz->vd_nparity);
if (vdrz->vn_vre.vre_state == DSS_SCANNING) {
fnvlist_add_boolean(nv, ZPOOL_CONFIG_RAIDZ_EXPANDING);
}
mutex_enter(&vdrz->vd_expand_lock);
if (!avl_is_empty(&vdrz->vd_expand_txgs)) {
uint64_t count = avl_numnodes(&vdrz->vd_expand_txgs);
uint64_t *txgs = kmem_alloc(sizeof (uint64_t) * count,
KM_SLEEP);
uint64_t i = 0;
for (reflow_node_t *re = avl_first(&vdrz->vd_expand_txgs);
re != NULL; re = AVL_NEXT(&vdrz->vd_expand_txgs, re)) {
txgs[i++] = re->re_txg;
}
fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_RAIDZ_EXPAND_TXGS,
txgs, count);
kmem_free(txgs, sizeof (uint64_t) * count);
}
mutex_exit(&vdrz->vd_expand_lock);
}
static uint64_t
vdev_raidz_nparity(vdev_t *vd)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
return (vdrz->vd_nparity);
}
static uint64_t
vdev_raidz_ndisks(vdev_t *vd)
{
return (vd->vdev_children);
}
vdev_ops_t vdev_raidz_ops = {
.vdev_op_init = vdev_raidz_init,
.vdev_op_fini = vdev_raidz_fini,
.vdev_op_open = vdev_raidz_open,
.vdev_op_close = vdev_raidz_close,
.vdev_op_asize = vdev_raidz_asize,
.vdev_op_min_asize = vdev_raidz_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_raidz_io_start,
.vdev_op_io_done = vdev_raidz_io_done,
.vdev_op_state_change = vdev_raidz_state_change,
.vdev_op_need_resilver = vdev_raidz_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_raidz_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = vdev_raidz_config_generate,
.vdev_op_nparity = vdev_raidz_nparity,
.vdev_op_ndisks = vdev_raidz_ndisks,
.vdev_op_type = VDEV_TYPE_RAIDZ, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
ZFS_MODULE_PARAM(zfs_vdev, raidz_, expand_max_reflow_bytes, ULONG, ZMOD_RW,
"For testing, pause RAIDZ expansion after reflowing this many bytes");
ZFS_MODULE_PARAM(zfs_vdev, raidz_, expand_max_copy_bytes, ULONG, ZMOD_RW,
"Max amount of concurrent i/o for RAIDZ expansion");
ZFS_MODULE_PARAM(zfs_vdev, raidz_, io_aggregate_rows, ULONG, ZMOD_RW,
"For expanded RAIDZ, aggregate reads that have more rows than this");
ZFS_MODULE_PARAM(zfs, zfs_, scrub_after_expand, INT, ZMOD_RW,
"For expanded RAIDZ, automatically start a pool scrub when expansion "
"completes");
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c b/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c
index e12b96170f55..340d32b61bf8 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c
@@ -1,673 +1,664 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Nešković. All rights reserved.
*/
#include <sys/simd.h>
#include <sys/zfs_context.h>
#include <sys/types.h>
#include <sys/zio.h>
#include <sys/debug.h>
#include <sys/zfs_debug.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_raidz_impl.h>
/* Opaque implementation with NULL methods to represent original methods */
static const raidz_impl_ops_t vdev_raidz_original_impl = {
.name = "original",
.is_supported = raidz_will_scalar_work,
};
/* RAIDZ parity op that contain the fastest methods */
static raidz_impl_ops_t vdev_raidz_fastest_impl = {
.name = "fastest"
};
/* All compiled in implementations */
static const raidz_impl_ops_t *const raidz_all_maths[] = {
&vdev_raidz_original_impl,
&vdev_raidz_scalar_impl,
#if defined(__x86_64) && defined(HAVE_SSE2) /* only x86_64 for now */
&vdev_raidz_sse2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_SSSE3) /* only x86_64 for now */
&vdev_raidz_ssse3_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX2) /* only x86_64 for now */
&vdev_raidz_avx2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) /* only x86_64 for now */
&vdev_raidz_avx512f_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512BW) /* only x86_64 for now */
&vdev_raidz_avx512bw_impl,
#endif
#if defined(__aarch64__) && !defined(__FreeBSD__)
&vdev_raidz_aarch64_neon_impl,
&vdev_raidz_aarch64_neonx2_impl,
#endif
#if defined(__powerpc__) && defined(__altivec__)
&vdev_raidz_powerpc_altivec_impl,
#endif
};
/* Indicate that benchmark has been completed */
static boolean_t raidz_math_initialized = B_FALSE;
/* Select raidz implementation */
#define IMPL_FASTEST (UINT32_MAX)
#define IMPL_CYCLE (UINT32_MAX - 1)
#define IMPL_ORIGINAL (0)
#define IMPL_SCALAR (1)
#define RAIDZ_IMPL_READ(i) (*(volatile uint32_t *) &(i))
-static uint32_t zfs_vdev_raidz_impl = IMPL_SCALAR;
+uint32_t zfs_vdev_raidz_impl = IMPL_SCALAR;
static uint32_t user_sel_impl = IMPL_FASTEST;
/* Hold all supported implementations */
static size_t raidz_supp_impl_cnt = 0;
static raidz_impl_ops_t *raidz_supp_impl[ARRAY_SIZE(raidz_all_maths)];
#if defined(_KERNEL)
/*
* kstats values for supported implementations
* Values represent per disk throughput of 8 disk+parity raidz vdev [B/s]
*/
static raidz_impl_kstat_t raidz_impl_kstats[ARRAY_SIZE(raidz_all_maths) + 1];
/* kstat for benchmarked implementations */
static kstat_t *raidz_math_kstat = NULL;
#endif
/*
* Returns the RAIDZ operations for raidz_map() parity calculations. When
* a SIMD implementation is not allowed in the current context, then fallback
* to the fastest generic implementation.
*/
const raidz_impl_ops_t *
vdev_raidz_math_get_ops(void)
{
if (!kfpu_allowed())
return (&vdev_raidz_scalar_impl);
raidz_impl_ops_t *ops = NULL;
const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl);
switch (impl) {
case IMPL_FASTEST:
ASSERT(raidz_math_initialized);
ops = &vdev_raidz_fastest_impl;
break;
case IMPL_CYCLE:
/* Cycle through all supported implementations */
ASSERT(raidz_math_initialized);
ASSERT3U(raidz_supp_impl_cnt, >, 0);
static size_t cycle_impl_idx = 0;
size_t idx = (++cycle_impl_idx) % raidz_supp_impl_cnt;
ops = raidz_supp_impl[idx];
break;
case IMPL_ORIGINAL:
ops = (raidz_impl_ops_t *)&vdev_raidz_original_impl;
break;
case IMPL_SCALAR:
ops = (raidz_impl_ops_t *)&vdev_raidz_scalar_impl;
break;
default:
ASSERT3U(impl, <, raidz_supp_impl_cnt);
ASSERT3U(raidz_supp_impl_cnt, >, 0);
if (impl < ARRAY_SIZE(raidz_all_maths))
ops = raidz_supp_impl[impl];
break;
}
ASSERT3P(ops, !=, NULL);
return (ops);
}
/*
* Select parity generation method for raidz_map
*/
int
vdev_raidz_math_generate(raidz_map_t *rm, raidz_row_t *rr)
{
raidz_gen_f gen_parity = NULL;
switch (raidz_parity(rm)) {
case 1:
gen_parity = rm->rm_ops->gen[RAIDZ_GEN_P];
break;
case 2:
gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQ];
break;
case 3:
gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQR];
break;
default:
gen_parity = NULL;
cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu",
(u_longlong_t)raidz_parity(rm));
break;
}
/* if method is NULL execute the original implementation */
if (gen_parity == NULL)
return (RAIDZ_ORIGINAL_IMPL);
gen_parity(rr);
return (0);
}
static raidz_rec_f
reconstruct_fun_p_sel(raidz_map_t *rm, const int *parity_valid,
const int nbaddata)
{
if (nbaddata == 1 && parity_valid[CODE_P]) {
return (rm->rm_ops->rec[RAIDZ_REC_P]);
}
return ((raidz_rec_f) NULL);
}
static raidz_rec_f
reconstruct_fun_pq_sel(raidz_map_t *rm, const int *parity_valid,
const int nbaddata)
{
if (nbaddata == 1) {
if (parity_valid[CODE_P]) {
return (rm->rm_ops->rec[RAIDZ_REC_P]);
} else if (parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_Q]);
}
} else if (nbaddata == 2 &&
parity_valid[CODE_P] && parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
}
return ((raidz_rec_f) NULL);
}
static raidz_rec_f
reconstruct_fun_pqr_sel(raidz_map_t *rm, const int *parity_valid,
const int nbaddata)
{
if (nbaddata == 1) {
if (parity_valid[CODE_P]) {
return (rm->rm_ops->rec[RAIDZ_REC_P]);
} else if (parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_Q]);
} else if (parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_R]);
}
} else if (nbaddata == 2) {
if (parity_valid[CODE_P] && parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
} else if (parity_valid[CODE_P] && parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_PR]);
} else if (parity_valid[CODE_Q] && parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_QR]);
}
} else if (nbaddata == 3 &&
parity_valid[CODE_P] && parity_valid[CODE_Q] &&
parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQR]);
}
return ((raidz_rec_f) NULL);
}
/*
* Select data reconstruction method for raidz_map
* @parity_valid - Parity validity flag
* @dt - Failed data index array
* @nbaddata - Number of failed data columns
*/
int
vdev_raidz_math_reconstruct(raidz_map_t *rm, raidz_row_t *rr,
const int *parity_valid, const int *dt, const int nbaddata)
{
raidz_rec_f rec_fn = NULL;
switch (raidz_parity(rm)) {
case PARITY_P:
rec_fn = reconstruct_fun_p_sel(rm, parity_valid, nbaddata);
break;
case PARITY_PQ:
rec_fn = reconstruct_fun_pq_sel(rm, parity_valid, nbaddata);
break;
case PARITY_PQR:
rec_fn = reconstruct_fun_pqr_sel(rm, parity_valid, nbaddata);
break;
default:
cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu",
(u_longlong_t)raidz_parity(rm));
break;
}
if (rec_fn == NULL)
return (RAIDZ_ORIGINAL_IMPL);
else
return (rec_fn(rr, dt));
}
const char *const raidz_gen_name[] = {
"gen_p", "gen_pq", "gen_pqr"
};
const char *const raidz_rec_name[] = {
"rec_p", "rec_q", "rec_r",
"rec_pq", "rec_pr", "rec_qr", "rec_pqr"
};
#if defined(_KERNEL)
#define RAIDZ_KSTAT_LINE_LEN (17 + 10*12 + 1)
static int
raidz_math_kstat_headers(char *buf, size_t size)
{
ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN);
ssize_t off = kmem_scnprintf(buf, size, "%-17s", "implementation");
for (int i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
off += kmem_scnprintf(buf + off, size - off, "%-16s",
raidz_gen_name[i]);
for (int i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
off += kmem_scnprintf(buf + off, size - off, "%-16s",
raidz_rec_name[i]);
(void) kmem_scnprintf(buf + off, size - off, "\n");
return (0);
}
static int
raidz_math_kstat_data(char *buf, size_t size, void *data)
{
raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
raidz_impl_kstat_t *cstat = (raidz_impl_kstat_t *)data;
ssize_t off = 0;
int i;
ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN);
if (cstat == fstat) {
off += kmem_scnprintf(buf + off, size - off, "%-17s",
"fastest");
for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++) {
int id = fstat->gen[i];
off += kmem_scnprintf(buf + off, size - off, "%-16s",
raidz_supp_impl[id]->name);
}
for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++) {
int id = fstat->rec[i];
off += kmem_scnprintf(buf + off, size - off, "%-16s",
raidz_supp_impl[id]->name);
}
} else {
ptrdiff_t id = cstat - raidz_impl_kstats;
off += kmem_scnprintf(buf + off, size - off, "%-17s",
raidz_supp_impl[id]->name);
for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
off += kmem_scnprintf(buf + off, size - off, "%-16llu",
(u_longlong_t)cstat->gen[i]);
for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
off += kmem_scnprintf(buf + off, size - off, "%-16llu",
(u_longlong_t)cstat->rec[i]);
}
(void) kmem_scnprintf(buf + off, size - off, "\n");
return (0);
}
static void *
raidz_math_kstat_addr(kstat_t *ksp, loff_t n)
{
if (n <= raidz_supp_impl_cnt)
ksp->ks_private = (void *) (raidz_impl_kstats + n);
else
ksp->ks_private = NULL;
return (ksp->ks_private);
}
#define BENCH_D_COLS (8ULL)
#define BENCH_COLS (BENCH_D_COLS + PARITY_PQR)
#define BENCH_ZIO_SIZE (1ULL << SPA_OLD_MAXBLOCKSHIFT) /* 128 kiB */
#define BENCH_NS MSEC2NSEC(1) /* 1ms */
typedef void (*benchmark_fn)(raidz_map_t *rm, const int fn);
static void
benchmark_gen_impl(raidz_map_t *rm, const int fn)
{
(void) fn;
vdev_raidz_generate_parity(rm);
}
static void
benchmark_rec_impl(raidz_map_t *rm, const int fn)
{
static const int rec_tgt[7][3] = {
{1, 2, 3}, /* rec_p: bad QR & D[0] */
{0, 2, 3}, /* rec_q: bad PR & D[0] */
{0, 1, 3}, /* rec_r: bad PQ & D[0] */
{2, 3, 4}, /* rec_pq: bad R & D[0][1] */
{1, 3, 4}, /* rec_pr: bad Q & D[0][1] */
{0, 3, 4}, /* rec_qr: bad P & D[0][1] */
{3, 4, 5} /* rec_pqr: bad & D[0][1][2] */
};
vdev_raidz_reconstruct(rm, rec_tgt[fn], 3);
}
/*
* Benchmarking of all supported implementations (raidz_supp_impl_cnt)
* is performed by setting the rm_ops pointer and calling the top level
* generate/reconstruct methods of bench_rm.
*/
static void
benchmark_raidz_impl(raidz_map_t *bench_rm, const int fn, benchmark_fn bench_fn)
{
uint64_t run_cnt, speed, best_speed = 0;
hrtime_t t_start, t_diff;
raidz_impl_ops_t *curr_impl;
raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
int impl, i;
for (impl = 0; impl < raidz_supp_impl_cnt; impl++) {
/* set an implementation to benchmark */
curr_impl = raidz_supp_impl[impl];
bench_rm->rm_ops = curr_impl;
run_cnt = 0;
t_start = gethrtime();
do {
for (i = 0; i < 5; i++, run_cnt++)
bench_fn(bench_rm, fn);
t_diff = gethrtime() - t_start;
} while (t_diff < BENCH_NS);
speed = run_cnt * BENCH_ZIO_SIZE * NANOSEC;
speed /= (t_diff * BENCH_COLS);
if (bench_fn == benchmark_gen_impl)
raidz_impl_kstats[impl].gen[fn] = speed;
else
raidz_impl_kstats[impl].rec[fn] = speed;
/* Update fastest implementation method */
if (speed > best_speed) {
best_speed = speed;
if (bench_fn == benchmark_gen_impl) {
fstat->gen[fn] = impl;
vdev_raidz_fastest_impl.gen[fn] =
curr_impl->gen[fn];
} else {
fstat->rec[fn] = impl;
vdev_raidz_fastest_impl.rec[fn] =
curr_impl->rec[fn];
}
}
}
}
#endif
/*
* Initialize and benchmark all supported implementations.
*/
static void
benchmark_raidz(void)
{
raidz_impl_ops_t *curr_impl;
int i, c;
/* Move supported impl into raidz_supp_impl */
for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i];
if (curr_impl->init)
curr_impl->init();
if (curr_impl->is_supported())
raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl;
}
membar_producer(); /* complete raidz_supp_impl[] init */
raidz_supp_impl_cnt = c; /* number of supported impl */
#if defined(_KERNEL)
abd_t *pabd;
zio_t *bench_zio = NULL;
raidz_map_t *bench_rm = NULL;
uint64_t bench_parity;
/* Fake a zio and run the benchmark on a warmed up buffer */
bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP);
bench_zio->io_offset = 0;
bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */
bench_zio->io_abd = abd_alloc_linear(BENCH_ZIO_SIZE, B_TRUE);
memset(abd_to_buf(bench_zio->io_abd), 0xAA, BENCH_ZIO_SIZE);
/* Benchmark parity generation methods */
for (int fn = 0; fn < RAIDZ_GEN_NUM; fn++) {
bench_parity = fn + 1;
/* New raidz_map is needed for each generate_p/q/r */
bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
BENCH_D_COLS + bench_parity, bench_parity);
benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl);
vdev_raidz_map_free(bench_rm);
}
/* Benchmark data reconstruction methods */
bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
BENCH_COLS, PARITY_PQR);
/* Ensure that fake parity blocks are initialized */
for (c = 0; c < bench_rm->rm_row[0]->rr_firstdatacol; c++) {
pabd = bench_rm->rm_row[0]->rr_col[c].rc_abd;
memset(abd_to_buf(pabd), 0xAA, abd_get_size(pabd));
}
for (int fn = 0; fn < RAIDZ_REC_NUM; fn++)
benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl);
vdev_raidz_map_free(bench_rm);
/* cleanup the bench zio */
abd_free(bench_zio->io_abd);
kmem_free(bench_zio, sizeof (zio_t));
#else
/*
* Skip the benchmark in user space to avoid impacting libzpool
* consumers (zdb, zhack, zinject, ztest). The last implementation
* is assumed to be the fastest and used by default.
*/
memcpy(&vdev_raidz_fastest_impl,
raidz_supp_impl[raidz_supp_impl_cnt - 1],
sizeof (vdev_raidz_fastest_impl));
strcpy(vdev_raidz_fastest_impl.name, "fastest");
#endif /* _KERNEL */
}
void
vdev_raidz_math_init(void)
{
/* Determine the fastest available implementation. */
benchmark_raidz();
#if defined(_KERNEL)
/* Install kstats for all implementations */
raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc",
KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
if (raidz_math_kstat != NULL) {
raidz_math_kstat->ks_data = NULL;
raidz_math_kstat->ks_ndata = UINT32_MAX;
kstat_set_raw_ops(raidz_math_kstat,
raidz_math_kstat_headers,
raidz_math_kstat_data,
raidz_math_kstat_addr);
kstat_install(raidz_math_kstat);
}
#endif
/* Finish initialization */
atomic_swap_32(&zfs_vdev_raidz_impl, user_sel_impl);
raidz_math_initialized = B_TRUE;
}
void
vdev_raidz_math_fini(void)
{
raidz_impl_ops_t const *curr_impl;
#if defined(_KERNEL)
if (raidz_math_kstat != NULL) {
kstat_delete(raidz_math_kstat);
raidz_math_kstat = NULL;
}
#endif
for (int i = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
curr_impl = raidz_all_maths[i];
if (curr_impl->fini)
curr_impl->fini();
}
}
static const struct {
const char *name;
uint32_t sel;
} math_impl_opts[] = {
{ "cycle", IMPL_CYCLE },
{ "fastest", IMPL_FASTEST },
{ "original", IMPL_ORIGINAL },
{ "scalar", IMPL_SCALAR }
};
/*
* Function sets desired raidz implementation.
*
* If we are called before init(), user preference will be saved in
* user_sel_impl, and applied in later init() call. This occurs when module
* parameter is specified on module load. Otherwise, directly update
* zfs_vdev_raidz_impl.
*
* @val Name of raidz implementation to use
* @param Unused.
*/
int
vdev_raidz_impl_set(const char *val)
{
int err = -EINVAL;
char req_name[RAIDZ_IMPL_NAME_MAX];
uint32_t impl = RAIDZ_IMPL_READ(user_sel_impl);
size_t i;
/* sanitize input */
i = strnlen(val, RAIDZ_IMPL_NAME_MAX);
if (i == 0 || i == RAIDZ_IMPL_NAME_MAX)
return (err);
strlcpy(req_name, val, RAIDZ_IMPL_NAME_MAX);
while (i > 0 && !!isspace(req_name[i-1]))
i--;
req_name[i] = '\0';
/* Check mandatory options */
for (i = 0; i < ARRAY_SIZE(math_impl_opts); i++) {
if (strcmp(req_name, math_impl_opts[i].name) == 0) {
impl = math_impl_opts[i].sel;
err = 0;
break;
}
}
/* check all supported impl if init() was already called */
if (err != 0 && raidz_math_initialized) {
/* check all supported implementations */
for (i = 0; i < raidz_supp_impl_cnt; i++) {
if (strcmp(req_name, raidz_supp_impl[i]->name) == 0) {
impl = i;
err = 0;
break;
}
}
}
if (err == 0) {
if (raidz_math_initialized)
atomic_swap_32(&zfs_vdev_raidz_impl, impl);
else
atomic_swap_32(&user_sel_impl, impl);
}
return (err);
}
-#if defined(_KERNEL) && defined(__linux__)
-
-static int
-zfs_vdev_raidz_impl_set(const char *val, zfs_kernel_param_t *kp)
-{
- return (vdev_raidz_impl_set(val));
-}
+#if defined(_KERNEL)
-static int
-zfs_vdev_raidz_impl_get(char *buffer, zfs_kernel_param_t *kp)
+int
+vdev_raidz_impl_get(char *buffer, size_t size)
{
int i, cnt = 0;
char *fmt;
const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl);
ASSERT(raidz_math_initialized);
/* list mandatory options */
for (i = 0; i < ARRAY_SIZE(math_impl_opts) - 2; i++) {
fmt = (impl == math_impl_opts[i].sel) ? "[%s] " : "%s ";
- cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
+ cnt += kmem_scnprintf(buffer + cnt, size - cnt, fmt,
math_impl_opts[i].name);
}
/* list all supported implementations */
for (i = 0; i < raidz_supp_impl_cnt; i++) {
fmt = (i == impl) ? "[%s] " : "%s ";
- cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
+ cnt += kmem_scnprintf(buffer + cnt, size - cnt, fmt,
raidz_supp_impl[i]->name);
}
return (cnt);
}
-module_param_call(zfs_vdev_raidz_impl, zfs_vdev_raidz_impl_set,
- zfs_vdev_raidz_impl_get, NULL, 0644);
-MODULE_PARM_DESC(zfs_vdev_raidz_impl, "Select raidz implementation.");
#endif
diff --git a/sys/contrib/openzfs/module/zfs/vdev_rebuild.c b/sys/contrib/openzfs/module/zfs/vdev_rebuild.c
index f80ed1b401f9..7ca1b1f846b6 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_rebuild.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_rebuild.c
@@ -1,1180 +1,1181 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*
* Copyright (c) 2018, Intel Corporation.
* Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
* Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
* Copyright (c) 2024 by Delphix. All rights reserved.
*/
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/dsl_scan.h>
#include <sys/spa_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/vdev_rebuild.h>
#include <sys/zio.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/zap.h>
/*
* This file contains the sequential reconstruction implementation for
* resilvering. This form of resilvering is internally referred to as device
* rebuild to avoid conflating it with the traditional healing reconstruction
* performed by the dsl scan code.
*
* When replacing a device, or scrubbing the pool, ZFS has historically used
* a process called resilvering which is a form of healing reconstruction.
* This approach has the advantage that as blocks are read from disk their
* checksums can be immediately verified and the data repaired. Unfortunately,
* it also results in a random IO pattern to the disk even when extra care
* is taken to sequentialize the IO as much as possible. This substantially
* increases the time required to resilver the pool and restore redundancy.
*
* For mirrored devices it's possible to implement an alternate sequential
* reconstruction strategy when resilvering. Sequential reconstruction
* behaves like a traditional RAID rebuild and reconstructs a device in LBA
* order without verifying the checksum. After this phase completes a second
* scrub phase is started to verify all of the checksums. This two phase
* process will take longer than the healing reconstruction described above.
* However, it has that advantage that after the reconstruction first phase
* completes redundancy has been restored. At this point the pool can incur
* another device failure without risking data loss.
*
* There are a few noteworthy limitations and other advantages of resilvering
* using sequential reconstruction vs healing reconstruction.
*
* Limitations:
*
* - Sequential reconstruction is not possible on RAIDZ due to its
* variable stripe width. Note dRAID uses a fixed stripe width which
* avoids this issue, but comes at the expense of some usable capacity.
*
* - Block checksums are not verified during sequential reconstruction.
* Similar to traditional RAID the parity/mirror data is reconstructed
* but cannot be immediately double checked. For this reason when the
* last active resilver completes the pool is automatically scrubbed
* by default.
*
* - Deferred resilvers using sequential reconstruction are not currently
* supported. When adding another vdev to an active top-level resilver
* it must be restarted.
*
* Advantages:
*
* - Sequential reconstruction is performed in LBA order which may be faster
* than healing reconstruction particularly when using HDDs (or
* especially with SMR devices). Only allocated capacity is resilvered.
*
* - Sequential reconstruction is not constrained by ZFS block boundaries.
* This allows it to issue larger IOs to disk which span multiple blocks
* allowing all of these logical blocks to be repaired with a single IO.
*
* - Unlike a healing resilver or scrub which are pool wide operations,
* sequential reconstruction is handled by the top-level vdevs. This
* allows for it to be started or canceled on a top-level vdev without
* impacting any other top-level vdevs in the pool.
*
* - Data only referenced by a pool checkpoint will be repaired because
* that space is reflected in the space maps. This differs for a
* healing resilver or scrub which will not repair that data.
*/
/*
* Size of rebuild reads; defaults to 1MiB per data disk and is capped at
* SPA_MAXBLOCKSIZE.
*/
static uint64_t zfs_rebuild_max_segment = 1024 * 1024;
/*
* Maximum number of parallelly executed bytes per leaf vdev caused by a
* sequential resilver. We attempt to strike a balance here between keeping
* the vdev queues full of I/Os at all times and not overflowing the queues
* to cause long latency, which would cause long txg sync times.
*
* A large default value can be safely used here because the default target
* segment size is also large (zfs_rebuild_max_segment=1M). This helps keep
* the queue depth short.
*
* 64MB was observed to deliver the best performance and set as the default.
* Testing was performed with a 106-drive dRAID HDD pool (draid2:11d:106c)
* and a rebuild rate of 1.2GB/s was measured to the distribute spare.
* Smaller values were unable to fully saturate the available pool I/O.
*/
static uint64_t zfs_rebuild_vdev_limit = 64 << 20;
/*
* Automatically start a pool scrub when the last active sequential resilver
* completes in order to verify the checksums of all blocks which have been
* resilvered. This option is enabled by default and is strongly recommended.
*/
static int zfs_rebuild_scrub_enabled = 1;
/*
* For vdev_rebuild_initiate_sync() and vdev_rebuild_reset_sync().
*/
static __attribute__((noreturn)) void vdev_rebuild_thread(void *arg);
static void vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx);
/*
* Clear the per-vdev rebuild bytes value for a vdev tree.
*/
static void
clear_rebuild_bytes(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
for (uint64_t i = 0; i < vd->vdev_children; i++)
clear_rebuild_bytes(vd->vdev_child[i]);
mutex_enter(&vd->vdev_stat_lock);
vs->vs_rebuild_processed = 0;
mutex_exit(&vd->vdev_stat_lock);
}
/*
* Determines whether a vdev_rebuild_thread() should be stopped.
*/
static boolean_t
vdev_rebuild_should_stop(vdev_t *vd)
{
return (!vdev_writeable(vd) || vd->vdev_removing ||
vd->vdev_rebuild_exit_wanted ||
vd->vdev_rebuild_cancel_wanted ||
vd->vdev_rebuild_reset_wanted);
}
/*
* Determine if the rebuild should be canceled. This may happen when all
* vdevs with MISSING DTLs are detached.
*/
static boolean_t
vdev_rebuild_should_cancel(vdev_t *vd)
{
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
if (!vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg))
return (B_TRUE);
return (B_FALSE);
}
/*
* The sync task for updating the on-disk state of a rebuild. This is
* scheduled by vdev_rebuild_range().
*/
static void
vdev_rebuild_update_sync(void *arg, dmu_tx_t *tx)
{
int vdev_id = (uintptr_t)arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
uint64_t txg = dmu_tx_get_txg(tx);
mutex_enter(&vd->vdev_rebuild_lock);
if (vr->vr_scan_offset[txg & TXG_MASK] > 0) {
vrp->vrp_last_offset = vr->vr_scan_offset[txg & TXG_MASK];
vr->vr_scan_offset[txg & TXG_MASK] = 0;
}
vrp->vrp_scan_time_ms = vr->vr_prev_scan_time_ms +
NSEC2MSEC(gethrtime() - vr->vr_pass_start_time);
VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
REBUILD_PHYS_ENTRIES, vrp, tx));
mutex_exit(&vd->vdev_rebuild_lock);
}
/*
* Initialize the on-disk state for a new rebuild, start the rebuild thread.
*/
static void
vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx)
{
int vdev_id = (uintptr_t)arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
ASSERT(vd->vdev_rebuilding);
spa_feature_incr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
mutex_enter(&vd->vdev_rebuild_lock);
memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
vrp->vrp_rebuild_state = VDEV_REBUILD_ACTIVE;
vrp->vrp_min_txg = 0;
vrp->vrp_max_txg = dmu_tx_get_txg(tx);
vrp->vrp_start_time = gethrestime_sec();
vrp->vrp_scan_time_ms = 0;
vr->vr_prev_scan_time_ms = 0;
/*
* Rebuilds are currently only used when replacing a device, in which
* case there must be DTL_MISSING entries. In the future, we could
* allow rebuilds to be used in a way similar to a scrub. This would
* be useful because it would allow us to rebuild the space used by
* pool checkpoints.
*/
VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg));
VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
REBUILD_PHYS_ENTRIES, vrp, tx));
spa_history_log_internal(spa, "rebuild", tx,
"vdev_id=%llu vdev_guid=%llu started",
(u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
vd->vdev_rebuild_thread = thread_create(NULL, 0,
vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&vd->vdev_rebuild_lock);
}
static void
vdev_rebuild_log_notify(spa_t *spa, vdev_t *vd, const char *name)
{
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, "sequential");
spa_event_notify(spa, vd, aux, name);
nvlist_free(aux);
}
/*
* Called to request that a new rebuild be started. The feature will remain
* active for the duration of the rebuild, then revert to the enabled state.
*/
static void
vdev_rebuild_initiate(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(vd->vdev_top == vd);
ASSERT(MUTEX_HELD(&vd->vdev_rebuild_lock));
ASSERT(!vd->vdev_rebuilding);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
vd->vdev_rebuilding = B_TRUE;
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_rebuild_initiate_sync,
(void *)(uintptr_t)vd->vdev_id, tx);
dmu_tx_commit(tx);
vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_START);
}
/*
* Update the on-disk state to completed when a rebuild finishes.
*/
static void
vdev_rebuild_complete_sync(void *arg, dmu_tx_t *tx)
{
int vdev_id = (uintptr_t)arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
mutex_enter(&vd->vdev_rebuild_lock);
/*
* Handle a second device failure if it occurs after all rebuild I/O
* has completed but before this sync task has been executed.
*/
if (vd->vdev_rebuild_reset_wanted) {
mutex_exit(&vd->vdev_rebuild_lock);
vdev_rebuild_reset_sync(arg, tx);
return;
}
vrp->vrp_rebuild_state = VDEV_REBUILD_COMPLETE;
vrp->vrp_end_time = gethrestime_sec();
VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
REBUILD_PHYS_ENTRIES, vrp, tx));
vdev_dtl_reassess(vd, tx->tx_txg, vrp->vrp_max_txg, B_TRUE, B_TRUE);
spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
spa_history_log_internal(spa, "rebuild", tx,
"vdev_id=%llu vdev_guid=%llu complete",
(u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH);
/* Handles detaching of spares */
spa_async_request(spa, SPA_ASYNC_REBUILD_DONE);
vd->vdev_rebuilding = B_FALSE;
mutex_exit(&vd->vdev_rebuild_lock);
/*
* While we're in syncing context take the opportunity to
* setup the scrub when there are no more active rebuilds.
*/
setup_sync_arg_t setup_sync_arg = {
.func = POOL_SCAN_SCRUB,
.txgstart = 0,
.txgend = 0,
};
if (dsl_scan_setup_check(&setup_sync_arg.func, tx) == 0 &&
zfs_rebuild_scrub_enabled) {
dsl_scan_setup_sync(&setup_sync_arg, tx);
}
cv_broadcast(&vd->vdev_rebuild_cv);
/* Clear recent error events (i.e. duplicate events tracking) */
zfs_ereport_clear(spa, NULL);
}
/*
* Update the on-disk state to canceled when a rebuild finishes.
*/
static void
vdev_rebuild_cancel_sync(void *arg, dmu_tx_t *tx)
{
int vdev_id = (uintptr_t)arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
mutex_enter(&vd->vdev_rebuild_lock);
vrp->vrp_rebuild_state = VDEV_REBUILD_CANCELED;
vrp->vrp_end_time = gethrestime_sec();
VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
REBUILD_PHYS_ENTRIES, vrp, tx));
spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
spa_history_log_internal(spa, "rebuild", tx,
"vdev_id=%llu vdev_guid=%llu canceled",
(u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH);
vd->vdev_rebuild_cancel_wanted = B_FALSE;
vd->vdev_rebuilding = B_FALSE;
mutex_exit(&vd->vdev_rebuild_lock);
spa_notify_waiters(spa);
cv_broadcast(&vd->vdev_rebuild_cv);
}
/*
* Resets the progress of a running rebuild. This will occur when a new
* vdev is added to rebuild.
*/
static void
vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx)
{
int vdev_id = (uintptr_t)arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
mutex_enter(&vd->vdev_rebuild_lock);
ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
vrp->vrp_last_offset = 0;
vrp->vrp_min_txg = 0;
vrp->vrp_max_txg = dmu_tx_get_txg(tx);
vrp->vrp_bytes_scanned = 0;
vrp->vrp_bytes_issued = 0;
vrp->vrp_bytes_rebuilt = 0;
vrp->vrp_bytes_est = 0;
vrp->vrp_scan_time_ms = 0;
vr->vr_prev_scan_time_ms = 0;
/* See vdev_rebuild_initiate_sync comment */
VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg));
VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
REBUILD_PHYS_ENTRIES, vrp, tx));
spa_history_log_internal(spa, "rebuild", tx,
"vdev_id=%llu vdev_guid=%llu reset",
(u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
vd->vdev_rebuild_reset_wanted = B_FALSE;
ASSERT(vd->vdev_rebuilding);
vd->vdev_rebuild_thread = thread_create(NULL, 0,
vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&vd->vdev_rebuild_lock);
}
/*
* Clear the last rebuild status.
*/
void
vdev_rebuild_clear_sync(void *arg, dmu_tx_t *tx)
{
int vdev_id = (uintptr_t)arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
objset_t *mos = spa_meta_objset(spa);
mutex_enter(&vd->vdev_rebuild_lock);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD) ||
vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE) {
mutex_exit(&vd->vdev_rebuild_lock);
return;
}
clear_rebuild_bytes(vd);
memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
if (vd->vdev_top_zap != 0 && zap_contains(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_VDEV_REBUILD_PHYS) == 0) {
VERIFY0(zap_update(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
REBUILD_PHYS_ENTRIES, vrp, tx));
}
mutex_exit(&vd->vdev_rebuild_lock);
}
/*
* The zio_done_func_t callback for each rebuild I/O issued. It's responsible
* for updating the rebuild stats and limiting the number of in flight I/Os.
*/
static void
vdev_rebuild_cb(zio_t *zio)
{
vdev_rebuild_t *vr = zio->io_private;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
vdev_t *vd = vr->vr_top_vdev;
mutex_enter(&vr->vr_io_lock);
if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
/*
* The I/O failed because the top-level vdev was unavailable.
* Attempt to roll back to the last completed offset, in order
* resume from the correct location if the pool is resumed.
* (This works because spa_sync waits on spa_txg_zio before
* it runs sync tasks.)
*/
uint64_t *off = &vr->vr_scan_offset[zio->io_txg & TXG_MASK];
*off = MIN(*off, zio->io_offset);
} else if (zio->io_error) {
vrp->vrp_errors++;
}
abd_free(zio->io_abd);
ASSERT3U(vr->vr_bytes_inflight, >, 0);
vr->vr_bytes_inflight -= zio->io_size;
cv_broadcast(&vr->vr_io_cv);
mutex_exit(&vr->vr_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* Initialize a block pointer that can be used to read the given segment
* for sequential rebuild.
*/
static void
vdev_rebuild_blkptr_init(blkptr_t *bp, vdev_t *vd, uint64_t start,
uint64_t asize)
{
ASSERT(vd->vdev_ops == &vdev_draid_ops ||
vd->vdev_ops == &vdev_mirror_ops ||
vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops);
uint64_t psize = vd->vdev_ops == &vdev_draid_ops ?
vdev_draid_asize_to_psize(vd, asize) : asize;
BP_ZERO(bp);
DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
DVA_SET_OFFSET(&bp->blk_dva[0], start);
DVA_SET_GANG(&bp->blk_dva[0], 0);
DVA_SET_ASIZE(&bp->blk_dva[0], asize);
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(bp, psize);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(bp, DMU_OT_NONE);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
}
/*
* Issues a rebuild I/O and takes care of rate limiting the number of queued
* rebuild I/Os. The provided start and size must be properly aligned for the
* top-level vdev type being rebuilt.
*/
static int
vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size)
{
uint64_t ms_id __maybe_unused = vr->vr_scan_msp->ms_id;
vdev_t *vd = vr->vr_top_vdev;
spa_t *spa = vd->vdev_spa;
blkptr_t blk;
ASSERT3U(ms_id, ==, start >> vd->vdev_ms_shift);
ASSERT3U(ms_id, ==, (start + size - 1) >> vd->vdev_ms_shift);
vr->vr_pass_bytes_scanned += size;
vr->vr_rebuild_phys.vrp_bytes_scanned += size;
/*
* Rebuild the data in this range by constructing a special block
* pointer. It has no relation to any existing blocks in the pool.
* However, by disabling checksum verification and issuing a scrub IO
* we can reconstruct and repair any children with missing data.
*/
vdev_rebuild_blkptr_init(&blk, vd, start, size);
uint64_t psize = BP_GET_PSIZE(&blk);
if (!vdev_dtl_need_resilver(vd, &blk.blk_dva[0], psize, TXG_UNKNOWN)) {
vr->vr_pass_bytes_skipped += size;
return (0);
}
mutex_enter(&vr->vr_io_lock);
/* Limit in flight rebuild I/Os */
while (vr->vr_bytes_inflight >= vr->vr_bytes_inflight_max)
cv_wait(&vr->vr_io_cv, &vr->vr_io_lock);
vr->vr_bytes_inflight += psize;
mutex_exit(&vr->vr_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
mutex_enter(&vd->vdev_rebuild_lock);
/* This is the first I/O for this txg. */
if (vr->vr_scan_offset[txg & TXG_MASK] == 0) {
vr->vr_scan_offset[txg & TXG_MASK] = start;
dsl_sync_task_nowait(spa_get_dsl(spa),
vdev_rebuild_update_sync,
(void *)(uintptr_t)vd->vdev_id, tx);
}
/* When exiting write out our progress. */
if (vdev_rebuild_should_stop(vd)) {
mutex_enter(&vr->vr_io_lock);
vr->vr_bytes_inflight -= psize;
mutex_exit(&vr->vr_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
mutex_exit(&vd->vdev_rebuild_lock);
dmu_tx_commit(tx);
return (SET_ERROR(EINTR));
}
mutex_exit(&vd->vdev_rebuild_lock);
dmu_tx_commit(tx);
vr->vr_scan_offset[txg & TXG_MASK] = start + size;
vr->vr_pass_bytes_issued += size;
vr->vr_rebuild_phys.vrp_bytes_issued += size;
zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, &blk,
abd_alloc(psize, B_FALSE), psize, vdev_rebuild_cb, vr,
ZIO_PRIORITY_REBUILD, ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL |
ZIO_FLAG_RESILVER, NULL));
return (0);
}
/*
* Issues rebuild I/Os for all ranges in the provided vr->vr_tree range tree.
*/
static int
vdev_rebuild_ranges(vdev_rebuild_t *vr)
{
vdev_t *vd = vr->vr_top_vdev;
zfs_btree_t *t = &vr->vr_scan_tree->rt_root;
zfs_btree_index_t idx;
int error;
- for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
+ for (zfs_range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
rs = zfs_btree_next(t, &idx, &idx)) {
- uint64_t start = rs_get_start(rs, vr->vr_scan_tree);
- uint64_t size = rs_get_end(rs, vr->vr_scan_tree) - start;
+ uint64_t start = zfs_rs_get_start(rs, vr->vr_scan_tree);
+ uint64_t size = zfs_rs_get_end(rs, vr->vr_scan_tree) - start;
/*
* zfs_scan_suspend_progress can be set to disable rebuild
* progress for testing. See comment in dsl_scan_sync().
*/
while (zfs_scan_suspend_progress &&
!vdev_rebuild_should_stop(vd)) {
delay(hz);
}
while (size > 0) {
uint64_t chunk_size;
/*
* Split range into legally-sized logical chunks
* given the constraints of the top-level vdev
* being rebuilt (dRAID or mirror).
*/
ASSERT3P(vd->vdev_ops, !=, NULL);
chunk_size = vd->vdev_ops->vdev_op_rebuild_asize(vd,
start, size, zfs_rebuild_max_segment);
error = vdev_rebuild_range(vr, start, chunk_size);
if (error != 0)
return (error);
size -= chunk_size;
start += chunk_size;
}
}
return (0);
}
/*
* Calculates the estimated capacity which remains to be scanned. Since
* we traverse the pool in metaslab order only allocated capacity beyond
* the vrp_last_offset need be considered. All lower offsets must have
* already been rebuilt and are thus already included in vrp_bytes_scanned.
*/
static void
vdev_rebuild_update_bytes_est(vdev_t *vd, uint64_t ms_id)
{
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
uint64_t bytes_est = vrp->vrp_bytes_scanned;
if (vrp->vrp_last_offset < vd->vdev_ms[ms_id]->ms_start)
return;
for (uint64_t i = ms_id; i < vd->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_ms[i];
mutex_enter(&msp->ms_lock);
bytes_est += metaslab_allocated_space(msp);
mutex_exit(&msp->ms_lock);
}
vrp->vrp_bytes_est = bytes_est;
}
/*
* Load from disk the top-level vdev's rebuild information.
*/
int
vdev_rebuild_load(vdev_t *vd)
{
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
spa_t *spa = vd->vdev_spa;
int err = 0;
mutex_enter(&vd->vdev_rebuild_lock);
vd->vdev_rebuilding = B_FALSE;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) {
memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
mutex_exit(&vd->vdev_rebuild_lock);
return (SET_ERROR(ENOTSUP));
}
ASSERT(vd->vdev_top == vd);
err = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
REBUILD_PHYS_ENTRIES, vrp);
/*
* A missing or damaged VDEV_TOP_ZAP_VDEV_REBUILD_PHYS should
* not prevent a pool from being imported. Clear the rebuild
* status allowing a new resilver/rebuild to be started.
*/
if (err == ENOENT || err == EOVERFLOW || err == ECKSUM) {
memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
} else if (err) {
mutex_exit(&vd->vdev_rebuild_lock);
return (err);
}
vr->vr_prev_scan_time_ms = vrp->vrp_scan_time_ms;
vr->vr_top_vdev = vd;
mutex_exit(&vd->vdev_rebuild_lock);
return (0);
}
/*
* Each scan thread is responsible for rebuilding a top-level vdev. The
* rebuild progress in tracked on-disk in VDEV_TOP_ZAP_VDEV_REBUILD_PHYS.
*/
static __attribute__((noreturn)) void
vdev_rebuild_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int error = 0;
/*
* If there's a scrub in process request that it be stopped. This
* is not required for a correct rebuild, but we do want rebuilds to
* emulate the resilver behavior as much as possible.
*/
dsl_pool_t *dsl = spa_get_dsl(spa);
if (dsl_scan_scrubbing(dsl))
dsl_scan_cancel(dsl);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&vd->vdev_rebuild_lock);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(vd->vdev_rebuild_thread, !=, NULL);
ASSERT(vd->vdev_rebuilding);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REBUILD));
ASSERT3B(vd->vdev_rebuild_cancel_wanted, ==, B_FALSE);
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
vr->vr_top_vdev = vd;
vr->vr_scan_msp = NULL;
- vr->vr_scan_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
+ vr->vr_scan_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL,
+ 0, 0);
mutex_init(&vr->vr_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vr->vr_io_cv, NULL, CV_DEFAULT, NULL);
vr->vr_pass_start_time = gethrtime();
vr->vr_pass_bytes_scanned = 0;
vr->vr_pass_bytes_issued = 0;
vr->vr_pass_bytes_skipped = 0;
uint64_t update_est_time = gethrtime();
vdev_rebuild_update_bytes_est(vd, 0);
clear_rebuild_bytes(vr->vr_top_vdev);
mutex_exit(&vd->vdev_rebuild_lock);
/*
* Systematically walk the metaslabs and issue rebuild I/Os for
* all ranges in the allocated space map.
*/
for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_ms[i];
vr->vr_scan_msp = msp;
/*
* Calculate the max number of in-flight bytes for top-level
* vdev scanning operations (minimum 1MB, maximum 1/2 of
* arc_c_max shared by all top-level vdevs). Limits for the
* issuing phase are done per top-level vdev and are handled
* separately.
*/
uint64_t limit = (arc_c_max / 2) / MAX(rvd->vdev_children, 1);
vr->vr_bytes_inflight_max = MIN(limit, MAX(1ULL << 20,
zfs_rebuild_vdev_limit * vd->vdev_children));
/*
* Removal of vdevs from the vdev tree may eliminate the need
* for the rebuild, in which case it should be canceled. The
* vdev_rebuild_cancel_wanted flag is set until the sync task
* completes. This may be after the rebuild thread exits.
*/
if (vdev_rebuild_should_cancel(vd)) {
vd->vdev_rebuild_cancel_wanted = B_TRUE;
error = EINTR;
break;
}
- ASSERT0(range_tree_space(vr->vr_scan_tree));
+ ASSERT0(zfs_range_tree_space(vr->vr_scan_tree));
/* Disable any new allocations to this metaslab */
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_disable(msp);
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* If there are outstanding allocations wait for them to be
* synced. This is needed to ensure all allocated ranges are
* on disk and therefore will be rebuilt.
*/
for (int j = 0; j < TXG_SIZE; j++) {
- if (range_tree_space(msp->ms_allocating[j])) {
+ if (zfs_range_tree_space(msp->ms_allocating[j])) {
mutex_exit(&msp->ms_lock);
mutex_exit(&msp->ms_sync_lock);
txg_wait_synced(dsl, 0);
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
break;
}
}
/*
* When a metaslab has been allocated from read its allocated
* ranges from the space map object into the vr_scan_tree.
* Then add inflight / unflushed ranges and remove inflight /
* unflushed frees. This is the minimum range to be rebuilt.
*/
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
vr->vr_scan_tree, SM_ALLOC));
for (int i = 0; i < TXG_SIZE; i++) {
- ASSERT0(range_tree_space(
+ ASSERT0(zfs_range_tree_space(
msp->ms_allocating[i]));
}
- range_tree_walk(msp->ms_unflushed_allocs,
- range_tree_add, vr->vr_scan_tree);
- range_tree_walk(msp->ms_unflushed_frees,
- range_tree_remove, vr->vr_scan_tree);
+ zfs_range_tree_walk(msp->ms_unflushed_allocs,
+ zfs_range_tree_add, vr->vr_scan_tree);
+ zfs_range_tree_walk(msp->ms_unflushed_frees,
+ zfs_range_tree_remove, vr->vr_scan_tree);
/*
* Remove ranges which have already been rebuilt based
* on the last offset. This can happen when restarting
* a scan after exporting and re-importing the pool.
*/
- range_tree_clear(vr->vr_scan_tree, 0,
+ zfs_range_tree_clear(vr->vr_scan_tree, 0,
vrp->vrp_last_offset);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&msp->ms_sync_lock);
/*
* To provide an accurate estimate re-calculate the estimated
* size every 5 minutes to account for recent allocations and
* frees made to space maps which have not yet been rebuilt.
*/
if (gethrtime() > update_est_time + SEC2NSEC(300)) {
update_est_time = gethrtime();
vdev_rebuild_update_bytes_est(vd, i);
}
/*
* Walk the allocated space map and issue the rebuild I/O.
*/
error = vdev_rebuild_ranges(vr);
- range_tree_vacate(vr->vr_scan_tree, NULL, NULL);
+ zfs_range_tree_vacate(vr->vr_scan_tree, NULL, NULL);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
metaslab_enable(msp, B_FALSE, B_FALSE);
if (error != 0)
break;
}
- range_tree_destroy(vr->vr_scan_tree);
+ zfs_range_tree_destroy(vr->vr_scan_tree);
spa_config_exit(spa, SCL_CONFIG, FTAG);
/* Wait for any remaining rebuild I/O to complete */
mutex_enter(&vr->vr_io_lock);
while (vr->vr_bytes_inflight > 0)
cv_wait(&vr->vr_io_cv, &vr->vr_io_lock);
mutex_exit(&vr->vr_io_lock);
mutex_destroy(&vr->vr_io_lock);
cv_destroy(&vr->vr_io_cv);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
dsl_pool_t *dp = spa_get_dsl(spa);
dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
mutex_enter(&vd->vdev_rebuild_lock);
if (error == 0) {
/*
* After a successful rebuild clear the DTLs of all ranges
* which were missing when the rebuild was started. These
* ranges must have been rebuilt as a consequence of rebuilding
* all allocated space. Note that unlike a scrub or resilver
* the rebuild operation will reconstruct data only referenced
* by a pool checkpoint. See the dsl_scan_done() comments.
*/
dsl_sync_task_nowait(dp, vdev_rebuild_complete_sync,
(void *)(uintptr_t)vd->vdev_id, tx);
} else if (vd->vdev_rebuild_cancel_wanted) {
/*
* The rebuild operation was canceled. This will occur when
* a device participating in the rebuild is detached.
*/
dsl_sync_task_nowait(dp, vdev_rebuild_cancel_sync,
(void *)(uintptr_t)vd->vdev_id, tx);
} else if (vd->vdev_rebuild_reset_wanted) {
/*
* Reset the running rebuild without canceling and restarting
* it. This will occur when a new device is attached and must
* participate in the rebuild.
*/
dsl_sync_task_nowait(dp, vdev_rebuild_reset_sync,
(void *)(uintptr_t)vd->vdev_id, tx);
} else {
/*
* The rebuild operation should be suspended. This may occur
* when detaching a child vdev or when exporting the pool. The
* rebuild is left in the active state so it will be resumed.
*/
ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
vd->vdev_rebuilding = B_FALSE;
}
dmu_tx_commit(tx);
vd->vdev_rebuild_thread = NULL;
mutex_exit(&vd->vdev_rebuild_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
cv_broadcast(&vd->vdev_rebuild_cv);
thread_exit();
}
/*
* Returns B_TRUE if any top-level vdev are rebuilding.
*/
boolean_t
vdev_rebuild_active(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
boolean_t ret = B_FALSE;
if (vd == spa->spa_root_vdev) {
for (uint64_t i = 0; i < vd->vdev_children; i++) {
ret = vdev_rebuild_active(vd->vdev_child[i]);
if (ret)
return (ret);
}
} else if (vd->vdev_top_zap != 0) {
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
mutex_enter(&vd->vdev_rebuild_lock);
ret = (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
mutex_exit(&vd->vdev_rebuild_lock);
}
return (ret);
}
/*
* Start a rebuild operation. The rebuild may be restarted when the
* top-level vdev is currently actively rebuilding.
*/
void
vdev_rebuild(vdev_t *vd)
{
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp __maybe_unused = &vr->vr_rebuild_phys;
ASSERT(vd->vdev_top == vd);
ASSERT(vdev_is_concrete(vd));
ASSERT(!vd->vdev_removing);
ASSERT(spa_feature_is_enabled(vd->vdev_spa,
SPA_FEATURE_DEVICE_REBUILD));
mutex_enter(&vd->vdev_rebuild_lock);
if (vd->vdev_rebuilding) {
ASSERT3U(vrp->vrp_rebuild_state, ==, VDEV_REBUILD_ACTIVE);
/*
* Signal a running rebuild operation that it should restart
* from the beginning because a new device was attached. The
* vdev_rebuild_reset_wanted flag is set until the sync task
* completes. This may be after the rebuild thread exits.
*/
if (!vd->vdev_rebuild_reset_wanted)
vd->vdev_rebuild_reset_wanted = B_TRUE;
} else {
vdev_rebuild_initiate(vd);
}
mutex_exit(&vd->vdev_rebuild_lock);
}
static void
vdev_rebuild_restart_impl(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
if (vd == spa->spa_root_vdev) {
for (uint64_t i = 0; i < vd->vdev_children; i++)
vdev_rebuild_restart_impl(vd->vdev_child[i]);
} else if (vd->vdev_top_zap != 0) {
vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
mutex_enter(&vd->vdev_rebuild_lock);
if (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE &&
vdev_writeable(vd) && !vd->vdev_rebuilding) {
ASSERT(spa_feature_is_active(spa,
SPA_FEATURE_DEVICE_REBUILD));
vd->vdev_rebuilding = B_TRUE;
vd->vdev_rebuild_thread = thread_create(NULL, 0,
vdev_rebuild_thread, vd, 0, &p0, TS_RUN,
maxclsyspri);
}
mutex_exit(&vd->vdev_rebuild_lock);
}
}
/*
* Conditionally restart all of the vdev_rebuild_thread's for a pool. The
* feature flag must be active and the rebuild in the active state. This
* cannot be used to start a new rebuild.
*/
void
vdev_rebuild_restart(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_load_thread == curthread);
vdev_rebuild_restart_impl(spa->spa_root_vdev);
}
/*
* Stop and wait for all of the vdev_rebuild_thread's associated with the
* vdev tree provide to be terminated (canceled or stopped).
*/
void
vdev_rebuild_stop_wait(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_export_thread == curthread);
if (vd == spa->spa_root_vdev) {
for (uint64_t i = 0; i < vd->vdev_children; i++)
vdev_rebuild_stop_wait(vd->vdev_child[i]);
} else if (vd->vdev_top_zap != 0) {
ASSERT(vd == vd->vdev_top);
mutex_enter(&vd->vdev_rebuild_lock);
if (vd->vdev_rebuild_thread != NULL) {
vd->vdev_rebuild_exit_wanted = B_TRUE;
while (vd->vdev_rebuilding) {
cv_wait(&vd->vdev_rebuild_cv,
&vd->vdev_rebuild_lock);
}
vd->vdev_rebuild_exit_wanted = B_FALSE;
}
mutex_exit(&vd->vdev_rebuild_lock);
}
}
/*
* Stop all rebuild operations but leave them in the active state so they
* will be resumed when importing the pool.
*/
void
vdev_rebuild_stop_all(spa_t *spa)
{
vdev_rebuild_stop_wait(spa->spa_root_vdev);
}
/*
* Rebuild statistics reported per top-level vdev.
*/
int
vdev_rebuild_get_stats(vdev_t *tvd, vdev_rebuild_stat_t *vrs)
{
spa_t *spa = tvd->vdev_spa;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
return (SET_ERROR(ENOTSUP));
if (tvd != tvd->vdev_top || tvd->vdev_top_zap == 0)
return (SET_ERROR(EINVAL));
int error = zap_contains(spa_meta_objset(spa),
tvd->vdev_top_zap, VDEV_TOP_ZAP_VDEV_REBUILD_PHYS);
if (error == ENOENT) {
memset(vrs, 0, sizeof (vdev_rebuild_stat_t));
vrs->vrs_state = VDEV_REBUILD_NONE;
error = 0;
} else if (error == 0) {
vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
mutex_enter(&tvd->vdev_rebuild_lock);
vrs->vrs_state = vrp->vrp_rebuild_state;
vrs->vrs_start_time = vrp->vrp_start_time;
vrs->vrs_end_time = vrp->vrp_end_time;
vrs->vrs_scan_time_ms = vrp->vrp_scan_time_ms;
vrs->vrs_bytes_scanned = vrp->vrp_bytes_scanned;
vrs->vrs_bytes_issued = vrp->vrp_bytes_issued;
vrs->vrs_bytes_rebuilt = vrp->vrp_bytes_rebuilt;
vrs->vrs_bytes_est = vrp->vrp_bytes_est;
vrs->vrs_errors = vrp->vrp_errors;
vrs->vrs_pass_time_ms = NSEC2MSEC(gethrtime() -
vr->vr_pass_start_time);
vrs->vrs_pass_bytes_scanned = vr->vr_pass_bytes_scanned;
vrs->vrs_pass_bytes_issued = vr->vr_pass_bytes_issued;
vrs->vrs_pass_bytes_skipped = vr->vr_pass_bytes_skipped;
mutex_exit(&tvd->vdev_rebuild_lock);
}
return (error);
}
ZFS_MODULE_PARAM(zfs, zfs_, rebuild_max_segment, U64, ZMOD_RW,
"Max segment size in bytes of rebuild reads");
ZFS_MODULE_PARAM(zfs, zfs_, rebuild_vdev_limit, U64, ZMOD_RW,
"Max bytes in flight per leaf vdev for sequential resilvers");
ZFS_MODULE_PARAM(zfs, zfs_, rebuild_scrub_enabled, INT, ZMOD_RW,
"Automatically scrub after sequential resilver completes");
diff --git a/sys/contrib/openzfs/module/zfs/vdev_removal.c b/sys/contrib/openzfs/module/zfs/vdev_removal.c
index 08c85a874803..1970c5425854 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_removal.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_removal.c
@@ -1,2566 +1,2571 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_dir.h>
#include <sys/arc.h>
#include <sys/zfeature.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/trace_zfs.h>
/*
* This file contains the necessary logic to remove vdevs from a
* storage pool. Currently, the only devices that can be removed
* are log, cache, and spare devices; and top level vdevs from a pool
* w/o raidz or mirrors. (Note that members of a mirror can be removed
* by the detach operation.)
*
* Log vdevs are removed by evacuating them and then turning the vdev
* into a hole vdev while holding spa config locks.
*
* Top level vdevs are removed and converted into an indirect vdev via
* a multi-step process:
*
* - Disable allocations from this device (spa_vdev_remove_top).
*
* - From a new thread (spa_vdev_remove_thread), copy data from
* the removing vdev to a different vdev. The copy happens in open
* context (spa_vdev_copy_impl) and issues a sync task
* (vdev_mapping_sync) so the sync thread can update the partial
* indirect mappings in core and on disk.
*
* - If a free happens during a removal, it is freed from the
* removing vdev, and if it has already been copied, from the new
* location as well (free_from_removing_vdev).
*
* - After the removal is completed, the copy thread converts the vdev
* into an indirect vdev (vdev_remove_complete) before instructing
* the sync thread to destroy the space maps and finish the removal
* (spa_finish_removal).
*/
typedef struct vdev_copy_arg {
metaslab_t *vca_msp;
uint64_t vca_outstanding_bytes;
uint64_t vca_read_error_bytes;
uint64_t vca_write_error_bytes;
kcondvar_t vca_cv;
kmutex_t vca_lock;
} vdev_copy_arg_t;
/*
* The maximum amount of memory we can use for outstanding i/o while
* doing a device removal. This determines how much i/o we can have
* in flight concurrently.
*/
static const uint_t zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
/*
* The largest contiguous segment that we will attempt to allocate when
* removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If
* there is a performance problem with attempting to allocate large blocks,
* consider decreasing this.
*
* See also the accessor function spa_remove_max_segment().
*/
uint_t zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
/*
* Ignore hard IO errors during device removal. When set if a device
* encounters hard IO error during the removal process the removal will
* not be cancelled. This can result in a normally recoverable block
* becoming permanently damaged and is not recommended.
*/
static int zfs_removal_ignore_errors = 0;
/*
* Allow a remap segment to span free chunks of at most this size. The main
* impact of a larger span is that we will read and write larger, more
* contiguous chunks, with more "unnecessary" data -- trading off bandwidth
* for iops. The value here was chosen to align with
* zfs_vdev_read_gap_limit, which is a similar concept when doing regular
* reads (but there's no reason it has to be the same).
*
* Additionally, a higher span will have the following relatively minor
* effects:
* - the mapping will be smaller, since one entry can cover more allocated
* segments
* - more of the fragmentation in the removing device will be preserved
* - we'll do larger allocations, which may fail and fall back on smaller
* allocations
*/
uint_t vdev_removal_max_span = 32 * 1024;
/*
* This is used by the test suite so that it can ensure that certain
* actions happen while in the middle of a removal.
*/
int zfs_removal_suspend_progress = 0;
#define VDEV_REMOVAL_ZAP_OBJS "lzap"
static __attribute__((noreturn)) void spa_vdev_remove_thread(void *arg);
static int spa_vdev_remove_cancel_impl(spa_t *spa);
static void
spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx)
{
VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_REMOVING, sizeof (uint64_t),
sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
&spa->spa_removing_phys, tx));
}
static nvlist_t *
spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
{
for (int i = 0; i < count; i++) {
uint64_t guid =
fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID);
if (guid == target_guid)
return (nvpp[i]);
}
return (NULL);
}
static void
vdev_activate(vdev_t *vd)
{
metaslab_group_t *mg = vd->vdev_mg;
spa_t *spa = vd->vdev_spa;
uint64_t vdev_space = spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
ASSERT(!vd->vdev_islog);
ASSERT(vd->vdev_noalloc);
metaslab_group_activate(mg);
metaslab_group_activate(vd->vdev_log_mg);
ASSERT3U(spa->spa_nonallocating_dspace, >=, vdev_space);
spa->spa_nonallocating_dspace -= vdev_space;
vd->vdev_noalloc = B_FALSE;
}
static int
vdev_passivate(vdev_t *vd, uint64_t *txg)
{
spa_t *spa = vd->vdev_spa;
int error;
ASSERT(!vd->vdev_noalloc);
vdev_t *rvd = spa->spa_root_vdev;
metaslab_group_t *mg = vd->vdev_mg;
metaslab_class_t *normal = spa_normal_class(spa);
if (mg->mg_class == normal) {
/*
* We must check that this is not the only allocating device in
* the pool before passivating, otherwise we will not be able
* to make progress because we can't allocate from any vdevs.
*/
boolean_t last = B_TRUE;
for (uint64_t id = 0; id < rvd->vdev_children; id++) {
vdev_t *cvd = rvd->vdev_child[id];
if (cvd == vd ||
cvd->vdev_ops == &vdev_indirect_ops)
continue;
metaslab_class_t *mc = cvd->vdev_mg->mg_class;
if (mc != normal)
continue;
if (!cvd->vdev_noalloc) {
last = B_FALSE;
break;
}
}
if (last)
return (SET_ERROR(EINVAL));
}
metaslab_group_passivate(mg);
ASSERT(!vd->vdev_islog);
metaslab_group_passivate(vd->vdev_log_mg);
/*
* Wait for the youngest allocations and frees to sync,
* and then wait for the deferral of those frees to finish.
*/
spa_vdev_config_exit(spa, NULL,
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
/*
* We must ensure that no "stubby" log blocks are allocated
* on the device to be removed. These blocks could be
* written at any time, including while we are in the middle
* of copying them.
*/
error = spa_reset_logs(spa);
*txg = spa_vdev_config_enter(spa);
if (error != 0) {
metaslab_group_activate(mg);
ASSERT(!vd->vdev_islog);
if (vd->vdev_log_mg != NULL)
metaslab_group_activate(vd->vdev_log_mg);
return (error);
}
spa->spa_nonallocating_dspace += spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
vd->vdev_noalloc = B_TRUE;
return (0);
}
/*
* Turn off allocations for a top-level device from the pool.
*
* Turning off allocations for a top-level device can take a significant
* amount of time. As a result we use the spa_vdev_config_[enter/exit]
* functions which allow us to grab and release the spa_config_lock while
* still holding the namespace lock. During each step the configuration
* is synced out.
*/
int
spa_vdev_noalloc(spa_t *spa, uint64_t guid)
{
vdev_t *vd;
uint64_t txg;
int error = 0;
ASSERT(!MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL)
error = SET_ERROR(ENOENT);
else if (vd->vdev_mg == NULL)
error = SET_ERROR(ZFS_ERR_VDEV_NOTSUP);
else if (!vd->vdev_noalloc)
error = vdev_passivate(vd, &txg);
if (error == 0) {
vdev_dirty_leaves(vd, VDD_DTL, txg);
vdev_config_dirty(vd);
}
error = spa_vdev_exit(spa, NULL, txg, error);
return (error);
}
int
spa_vdev_alloc(spa_t *spa, uint64_t guid)
{
vdev_t *vd;
uint64_t txg;
int error = 0;
ASSERT(!MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL)
error = SET_ERROR(ENOENT);
else if (vd->vdev_mg == NULL)
error = SET_ERROR(ZFS_ERR_VDEV_NOTSUP);
else if (!vd->vdev_removing)
vdev_activate(vd);
if (error == 0) {
vdev_dirty_leaves(vd, VDD_DTL, txg);
vdev_config_dirty(vd);
}
(void) spa_vdev_exit(spa, NULL, txg, error);
return (error);
}
static void
spa_vdev_remove_aux(nvlist_t *config, const char *name, nvlist_t **dev,
int count, nvlist_t *dev_to_remove)
{
nvlist_t **newdev = NULL;
if (count > 1)
newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
for (int i = 0, j = 0; i < count; i++) {
if (dev[i] == dev_to_remove)
continue;
VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
}
VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
fnvlist_add_nvlist_array(config, name, (const nvlist_t * const *)newdev,
count - 1);
for (int i = 0; i < count - 1; i++)
nvlist_free(newdev[i]);
if (count > 1)
kmem_free(newdev, (count - 1) * sizeof (void *));
}
static spa_vdev_removal_t *
spa_vdev_removal_create(vdev_t *vd)
{
spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
- svr->svr_allocd_segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
+ svr->svr_allocd_segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
+ NULL, 0, 0);
svr->svr_vdev_id = vd->vdev_id;
for (int i = 0; i < TXG_SIZE; i++) {
- svr->svr_frees[i] = range_tree_create(NULL, RANGE_SEG64, NULL,
- 0, 0);
+ svr->svr_frees[i] = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
+ NULL, 0, 0);
list_create(&svr->svr_new_segments[i],
sizeof (vdev_indirect_mapping_entry_t),
offsetof(vdev_indirect_mapping_entry_t, vime_node));
}
return (svr);
}
void
spa_vdev_removal_destroy(spa_vdev_removal_t *svr)
{
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(svr->svr_bytes_done[i]);
ASSERT0(svr->svr_max_offset_to_sync[i]);
- range_tree_destroy(svr->svr_frees[i]);
+ zfs_range_tree_destroy(svr->svr_frees[i]);
list_destroy(&svr->svr_new_segments[i]);
}
- range_tree_destroy(svr->svr_allocd_segs);
+ zfs_range_tree_destroy(svr->svr_allocd_segs);
mutex_destroy(&svr->svr_lock);
cv_destroy(&svr->svr_cv);
kmem_free(svr, sizeof (*svr));
}
/*
* This is called as a synctask in the txg in which we will mark this vdev
* as removing (in the config stored in the MOS).
*
* It begins the evacuation of a toplevel vdev by:
* - initializing the spa_removing_phys which tracks this removal
* - computing the amount of space to remove for accounting purposes
* - dirtying all dbufs in the spa_config_object
* - creating the spa_vdev_removal
* - starting the spa_vdev_remove_thread
*/
static void
vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
{
int vdev_id = (uintptr_t)arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
objset_t *mos = spa->spa_dsl_pool->dp_meta_objset;
spa_vdev_removal_t *svr = NULL;
uint64_t txg __maybe_unused = dmu_tx_get_txg(tx);
ASSERT0(vdev_get_nparity(vd));
svr = spa_vdev_removal_create(vd);
ASSERT(vd->vdev_removing);
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
/*
* By activating the OBSOLETE_COUNTS feature, we prevent
* the pool from being downgraded and ensure that the
* refcounts are precise.
*/
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
uint64_t one = 1;
VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1,
&one, tx));
boolean_t are_precise __maybe_unused;
ASSERT0(vdev_obsolete_counts_are_precise(vd, &are_precise));
ASSERT3B(are_precise, ==, B_TRUE);
}
vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx);
vd->vdev_indirect_mapping =
vdev_indirect_mapping_open(mos, vic->vic_mapping_object);
vic->vic_births_object = vdev_indirect_births_alloc(mos, tx);
vd->vdev_indirect_births =
vdev_indirect_births_open(mos, vic->vic_births_object);
spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id;
spa->spa_removing_phys.sr_start_time = gethrestime_sec();
spa->spa_removing_phys.sr_end_time = 0;
spa->spa_removing_phys.sr_state = DSS_SCANNING;
spa->spa_removing_phys.sr_to_copy = 0;
spa->spa_removing_phys.sr_copied = 0;
/*
* Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because
* there may be space in the defer tree, which is free, but still
* counted in vs_alloc.
*/
for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
metaslab_t *ms = vd->vdev_ms[i];
if (ms->ms_sm == NULL)
continue;
spa->spa_removing_phys.sr_to_copy +=
metaslab_allocated_space(ms);
/*
* Space which we are freeing this txg does not need to
* be copied.
*/
spa->spa_removing_phys.sr_to_copy -=
- range_tree_space(ms->ms_freeing);
+ zfs_range_tree_space(ms->ms_freeing);
- ASSERT0(range_tree_space(ms->ms_freed));
+ ASSERT0(zfs_range_tree_space(ms->ms_freed));
for (int t = 0; t < TXG_SIZE; t++)
- ASSERT0(range_tree_space(ms->ms_allocating[t]));
+ ASSERT0(zfs_range_tree_space(ms->ms_allocating[t]));
}
/*
* Sync tasks are called before metaslab_sync(), so there should
* be no already-synced metaslabs in the TXG_CLEAN list.
*/
ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
spa_sync_removing_state(spa, tx);
/*
* All blocks that we need to read the most recent mapping must be
* stored on concrete vdevs. Therefore, we must dirty anything that
* is read before spa_remove_init(). Specifically, the
* spa_config_object. (Note that although we already modified the
* spa_config_object in spa_sync_removing_state, that may not have
* modified all blocks of the object.)
*/
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi));
for (uint64_t offset = 0; offset < doi.doi_max_offset; ) {
dmu_buf_t *dbuf;
VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT,
offset, FTAG, &dbuf, 0));
dmu_buf_will_dirty(dbuf, tx);
offset += dbuf->db_size;
dmu_buf_rele(dbuf, FTAG);
}
/*
* Now that we've allocated the im_object, dirty the vdev to ensure
* that the object gets written to the config on disk.
*/
vdev_config_dirty(vd);
zfs_dbgmsg("starting removal thread for vdev %llu (%px) in txg %llu "
"im_obj=%llu", (u_longlong_t)vd->vdev_id, vd,
(u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)vic->vic_mapping_object);
spa_history_log_internal(spa, "vdev remove started", tx,
"%s vdev %llu %s", spa_name(spa), (u_longlong_t)vd->vdev_id,
(vd->vdev_path != NULL) ? vd->vdev_path : "-");
/*
* Setting spa_vdev_removal causes subsequent frees to call
* free_from_removing_vdev(). Note that we don't need any locking
* because we are the sync thread, and metaslab_free_impl() is only
* called from syncing context (potentially from a zio taskq thread,
* but in any case only when there are outstanding free i/os, which
* there are not).
*/
ASSERT3P(spa->spa_vdev_removal, ==, NULL);
spa->spa_vdev_removal = svr;
svr->svr_thread = thread_create(NULL, 0,
spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri);
}
/*
* When we are opening a pool, we must read the mapping for each
* indirect vdev in order from most recently removed to least
* recently removed. We do this because the blocks for the mapping
* of older indirect vdevs may be stored on more recently removed vdevs.
* In order to read each indirect mapping object, we must have
* initialized all more recently removed vdevs.
*/
int
spa_remove_init(spa_t *spa)
{
int error;
error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_REMOVING, sizeof (uint64_t),
sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
&spa->spa_removing_phys);
if (error == ENOENT) {
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
return (0);
} else if (error != 0) {
return (error);
}
if (spa->spa_removing_phys.sr_state == DSS_SCANNING) {
/*
* We are currently removing a vdev. Create and
* initialize a spa_vdev_removal_t from the bonus
* buffer of the removing vdevs vdev_im_object, and
* initialize its partial mapping.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_t *vd = vdev_lookup_top(spa,
spa->spa_removing_phys.sr_removing_vdev);
if (vd == NULL) {
spa_config_exit(spa, SCL_STATE, FTAG);
return (EINVAL);
}
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
ASSERT(vdev_is_concrete(vd));
spa_vdev_removal_t *svr = spa_vdev_removal_create(vd);
ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id);
ASSERT(vd->vdev_removing);
vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
spa->spa_meta_objset, vic->vic_mapping_object);
vd->vdev_indirect_births = vdev_indirect_births_open(
spa->spa_meta_objset, vic->vic_births_object);
spa_config_exit(spa, SCL_STATE, FTAG);
spa->spa_vdev_removal = svr;
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
uint64_t indirect_vdev_id =
spa->spa_removing_phys.sr_prev_indirect_vdev;
while (indirect_vdev_id != UINT64_MAX) {
vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
spa->spa_meta_objset, vic->vic_mapping_object);
vd->vdev_indirect_births = vdev_indirect_births_open(
spa->spa_meta_objset, vic->vic_births_object);
indirect_vdev_id = vic->vic_prev_indirect_vdev;
}
spa_config_exit(spa, SCL_STATE, FTAG);
/*
* Now that we've loaded all the indirect mappings, we can allow
* reads from other blocks (e.g. via predictive prefetch).
*/
spa->spa_indirect_vdevs_loaded = B_TRUE;
return (0);
}
void
spa_restart_removal(spa_t *spa)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
if (svr == NULL)
return;
/*
* In general when this function is called there is no
* removal thread running. The only scenario where this
* is not true is during spa_import() where this function
* is called twice [once from spa_import_impl() and
* spa_async_resume()]. Thus, in the scenario where we
* import a pool that has an ongoing removal we don't
* want to spawn a second thread.
*/
if (svr->svr_thread != NULL)
return;
if (!spa_writeable(spa))
return;
zfs_dbgmsg("restarting removal of %llu",
(u_longlong_t)svr->svr_vdev_id);
svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa,
0, &p0, TS_RUN, minclsyspri);
}
/*
* Process freeing from a device which is in the middle of being removed.
* We must handle this carefully so that we attempt to copy freed data,
* and we correctly free already-copied data.
*/
void
free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
{
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t txg = spa_syncing_txg(spa);
uint64_t max_offset_yet = 0;
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==,
vdev_indirect_mapping_object(vim));
ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id);
mutex_enter(&svr->svr_lock);
/*
* Remove the segment from the removing vdev's spacemap. This
* ensures that we will not attempt to copy this space (if the
* removal thread has not yet visited it), and also ensures
* that we know what is actually allocated on the new vdevs
* (needed if we cancel the removal).
*
* Note: we must do the metaslab_free_concrete() with the svr_lock
* held, so that the remove_thread can not load this metaslab and then
* visit this offset between the time that we metaslab_free_concrete()
* and when we check to see if it has been visited.
*
* Note: The checkpoint flag is set to false as having/taking
* a checkpoint and removing a device can't happen at the same
* time.
*/
ASSERT(!spa_has_checkpoint(spa));
metaslab_free_concrete(vd, offset, size, B_FALSE);
uint64_t synced_size = 0;
uint64_t synced_offset = 0;
uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim);
if (offset < max_offset_synced) {
/*
* The mapping for this offset is already on disk.
* Free from the new location.
*
* Note that we use svr_max_synced_offset because it is
* updated atomically with respect to the in-core mapping.
* By contrast, vim_max_offset is not.
*
* This block may be split between a synced entry and an
* in-flight or unvisited entry. Only process the synced
* portion of it here.
*/
synced_size = MIN(size, max_offset_synced - offset);
synced_offset = offset;
ASSERT3U(max_offset_yet, <=, max_offset_synced);
max_offset_yet = max_offset_synced;
DTRACE_PROBE3(remove__free__synced,
spa_t *, spa,
uint64_t, offset,
uint64_t, synced_size);
size -= synced_size;
offset += synced_size;
}
/*
* Look at all in-flight txgs starting from the currently syncing one
* and see if a section of this free is being copied. By starting from
* this txg and iterating forward, we might find that this region
* was copied in two different txgs and handle it appropriately.
*/
for (int i = 0; i < TXG_CONCURRENT_STATES; i++) {
int txgoff = (txg + i) & TXG_MASK;
if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) {
/*
* The mapping for this offset is in flight, and
* will be synced in txg+i.
*/
uint64_t inflight_size = MIN(size,
svr->svr_max_offset_to_sync[txgoff] - offset);
DTRACE_PROBE4(remove__free__inflight,
spa_t *, spa,
uint64_t, offset,
uint64_t, inflight_size,
uint64_t, txg + i);
/*
* We copy data in order of increasing offset.
* Therefore the max_offset_to_sync[] must increase
* (or be zero, indicating that nothing is being
* copied in that txg).
*/
if (svr->svr_max_offset_to_sync[txgoff] != 0) {
ASSERT3U(svr->svr_max_offset_to_sync[txgoff],
>=, max_offset_yet);
max_offset_yet =
svr->svr_max_offset_to_sync[txgoff];
}
/*
* We've already committed to copying this segment:
* we have allocated space elsewhere in the pool for
* it and have an IO outstanding to copy the data. We
* cannot free the space before the copy has
* completed, or else the copy IO might overwrite any
* new data. To free that space, we record the
* segment in the appropriate svr_frees tree and free
* the mapped space later, in the txg where we have
* completed the copy and synced the mapping (see
* vdev_mapping_sync).
*/
- range_tree_add(svr->svr_frees[txgoff],
+ zfs_range_tree_add(svr->svr_frees[txgoff],
offset, inflight_size);
size -= inflight_size;
offset += inflight_size;
/*
* This space is already accounted for as being
* done, because it is being copied in txg+i.
* However, if i!=0, then it is being copied in
* a future txg. If we crash after this txg
* syncs but before txg+i syncs, then the space
* will be free. Therefore we must account
* for the space being done in *this* txg
* (when it is freed) rather than the future txg
* (when it will be copied).
*/
ASSERT3U(svr->svr_bytes_done[txgoff], >=,
inflight_size);
svr->svr_bytes_done[txgoff] -= inflight_size;
svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
}
}
ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
if (size > 0) {
/*
* The copy thread has not yet visited this offset. Ensure
* that it doesn't.
*/
DTRACE_PROBE3(remove__free__unvisited,
spa_t *, spa,
uint64_t, offset,
uint64_t, size);
if (svr->svr_allocd_segs != NULL)
- range_tree_clear(svr->svr_allocd_segs, offset, size);
+ zfs_range_tree_clear(svr->svr_allocd_segs, offset,
+ size);
/*
* Since we now do not need to copy this data, for
* accounting purposes we have done our job and can count
* it as completed.
*/
svr->svr_bytes_done[txg & TXG_MASK] += size;
}
mutex_exit(&svr->svr_lock);
/*
* Now that we have dropped svr_lock, process the synced portion
* of this free.
*/
if (synced_size > 0) {
vdev_indirect_mark_obsolete(vd, synced_offset, synced_size);
/*
* Note: this can only be called from syncing context,
* and the vdev_indirect_mapping is only changed from the
* sync thread, so we don't need svr_lock while doing
* metaslab_free_impl_cb.
*/
boolean_t checkpoint = B_FALSE;
vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size,
metaslab_free_impl_cb, &checkpoint);
}
}
/*
* Stop an active removal and update the spa_removing phys.
*/
static void
spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa));
/* Ensure the removal thread has completed before we free the svr. */
spa_vdev_remove_suspend(spa);
ASSERT(state == DSS_FINISHED || state == DSS_CANCELED);
if (state == DSS_FINISHED) {
spa_removing_phys_t *srp = &spa->spa_removing_phys;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (srp->sr_prev_indirect_vdev != -1) {
vdev_t *pvd;
pvd = vdev_lookup_top(spa,
srp->sr_prev_indirect_vdev);
ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops);
}
vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev;
srp->sr_prev_indirect_vdev = vd->vdev_id;
}
spa->spa_removing_phys.sr_state = state;
spa->spa_removing_phys.sr_end_time = gethrestime_sec();
spa->spa_vdev_removal = NULL;
spa_vdev_removal_destroy(svr);
spa_sync_removing_state(spa, tx);
spa_notify_waiters(spa);
vdev_config_dirty(spa->spa_root_vdev);
}
static void
free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_mark_obsolete(vd, offset, size);
boolean_t checkpoint = B_FALSE;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
metaslab_free_impl_cb, &checkpoint);
}
/*
* On behalf of the removal thread, syncs an incremental bit more of
* the indirect mapping to disk and updates the in-memory mapping.
* Called as a sync task in every txg that the removal thread makes progress.
*/
static void
vdev_mapping_sync(void *arg, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
uint64_t txg = dmu_tx_get_txg(tx);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT(vic->vic_mapping_object != 0);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
vdev_indirect_mapping_add_entries(vim,
&svr->svr_new_segments[txg & TXG_MASK], tx);
vdev_indirect_births_add_entry(vd->vdev_indirect_births,
vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx);
/*
* Free the copied data for anything that was freed while the
* mapping entries were in flight.
*/
mutex_enter(&svr->svr_lock);
- range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
+ zfs_range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
free_mapped_segment_cb, vd);
ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
vdev_indirect_mapping_max_offset(vim));
svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
mutex_exit(&svr->svr_lock);
spa_sync_removing_state(spa, tx);
}
typedef struct vdev_copy_segment_arg {
spa_t *vcsa_spa;
dva_t *vcsa_dest_dva;
uint64_t vcsa_txg;
- range_tree_t *vcsa_obsolete_segs;
+ zfs_range_tree_t *vcsa_obsolete_segs;
} vdev_copy_segment_arg_t;
static void
unalloc_seg(void *arg, uint64_t start, uint64_t size)
{
vdev_copy_segment_arg_t *vcsa = arg;
spa_t *spa = vcsa->vcsa_spa;
blkptr_t bp = { { { {0} } } };
BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(&bp, size);
BP_SET_PSIZE(&bp, size);
BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(&bp, DMU_OT_NONE);
BP_SET_LEVEL(&bp, 0);
BP_SET_DEDUP(&bp, 0);
BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva));
DVA_SET_OFFSET(&bp.blk_dva[0],
DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start);
DVA_SET_ASIZE(&bp.blk_dva[0], size);
zio_free(spa, vcsa->vcsa_txg, &bp);
}
/*
* All reads and writes associated with a call to spa_vdev_copy_segment()
* are done.
*/
static void
spa_vdev_copy_segment_done(zio_t *zio)
{
vdev_copy_segment_arg_t *vcsa = zio->io_private;
- range_tree_vacate(vcsa->vcsa_obsolete_segs,
+ zfs_range_tree_vacate(vcsa->vcsa_obsolete_segs,
unalloc_seg, vcsa);
- range_tree_destroy(vcsa->vcsa_obsolete_segs);
+ zfs_range_tree_destroy(vcsa->vcsa_obsolete_segs);
kmem_free(vcsa, sizeof (*vcsa));
spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
}
/*
* The write of the new location is done.
*/
static void
spa_vdev_copy_segment_write_done(zio_t *zio)
{
vdev_copy_arg_t *vca = zio->io_private;
abd_free(zio->io_abd);
mutex_enter(&vca->vca_lock);
vca->vca_outstanding_bytes -= zio->io_size;
if (zio->io_error != 0)
vca->vca_write_error_bytes += zio->io_size;
cv_signal(&vca->vca_cv);
mutex_exit(&vca->vca_lock);
}
/*
* The read of the old location is done. The parent zio is the write to
* the new location. Allow it to start.
*/
static void
spa_vdev_copy_segment_read_done(zio_t *zio)
{
vdev_copy_arg_t *vca = zio->io_private;
if (zio->io_error != 0) {
mutex_enter(&vca->vca_lock);
vca->vca_read_error_bytes += zio->io_size;
mutex_exit(&vca->vca_lock);
}
zio_nowait(zio_unique_parent(zio));
}
/*
* If the old and new vdevs are mirrors, we will read both sides of the old
* mirror, and write each copy to the corresponding side of the new mirror.
* If the old and new vdevs have a different number of children, we will do
* this as best as possible. Since we aren't verifying checksums, this
* ensures that as long as there's a good copy of the data, we'll have a
* good copy after the removal, even if there's silent damage to one side
* of the mirror. If we're removing a mirror that has some silent damage,
* we'll have exactly the same damage in the new location (assuming that
* the new location is also a mirror).
*
* We accomplish this by creating a tree of zio_t's, with as many writes as
* there are "children" of the new vdev (a non-redundant vdev counts as one
* child, a 2-way mirror has 2 children, etc). Each write has an associated
* read from a child of the old vdev. Typically there will be the same
* number of children of the old and new vdevs. However, if there are more
* children of the new vdev, some child(ren) of the old vdev will be issued
* multiple reads. If there are more children of the old vdev, some copies
* will be dropped.
*
* For example, the tree of zio_t's for a 2-way mirror is:
*
* null
* / \
* write(new vdev, child 0) write(new vdev, child 1)
* | |
* read(old vdev, child 0) read(old vdev, child 1)
*
* Child zio's complete before their parents complete. However, zio's
* created with zio_vdev_child_io() may be issued before their children
* complete. In this case we need to make sure that the children (reads)
* complete before the parents (writes) are *issued*. We do this by not
* calling zio_nowait() on each write until its corresponding read has
* completed.
*
* The spa_config_lock must be held while zio's created by
* zio_vdev_child_io() are in progress, to ensure that the vdev tree does
* not change (e.g. due to a concurrent "zpool attach/detach"). The "null"
* zio is needed to release the spa_config_lock after all the reads and
* writes complete. (Note that we can't grab the config lock for each read,
* because it is not reentrant - we could deadlock with a thread waiting
* for a write lock.)
*/
static void
spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio,
vdev_t *source_vd, uint64_t source_offset,
vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size)
{
ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0);
/*
* If the destination child in unwritable then there is no point
* in issuing the source reads which cannot be written.
*/
if (!vdev_writeable(dest_child_vd))
return;
mutex_enter(&vca->vca_lock);
vca->vca_outstanding_bytes += size;
mutex_exit(&vca->vca_lock);
abd_t *abd = abd_alloc_for_io(size, B_FALSE);
vdev_t *source_child_vd = NULL;
if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) {
/*
* Source and dest are both mirrors. Copy from the same
* child id as we are copying to (wrapping around if there
* are more dest children than source children). If the
* preferred source child is unreadable select another.
*/
for (int i = 0; i < source_vd->vdev_children; i++) {
source_child_vd = source_vd->vdev_child[
(dest_id + i) % source_vd->vdev_children];
if (vdev_readable(source_child_vd))
break;
}
} else {
source_child_vd = source_vd;
}
/*
* There should always be at least one readable source child or
* the pool would be in a suspended state. Somehow selecting an
* unreadable child would result in IO errors, the removal process
* being cancelled, and the pool reverting to its pre-removal state.
*/
ASSERT3P(source_child_vd, !=, NULL);
zio_t *write_zio = zio_vdev_child_io(nzio, NULL,
dest_child_vd, dest_offset, abd, size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL,
spa_vdev_copy_segment_write_done, vca);
zio_nowait(zio_vdev_child_io(write_zio, NULL,
source_child_vd, source_offset, abd, size,
ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL,
spa_vdev_copy_segment_read_done, vca));
}
/*
* Allocate a new location for this segment, and create the zio_t's to
* read from the old location and write to the new location.
*/
static int
-spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
+spa_vdev_copy_segment(vdev_t *vd, zfs_range_tree_t *segs,
uint64_t maxalloc, uint64_t txg,
vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
{
metaslab_group_t *mg = vd->vdev_mg;
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_indirect_mapping_entry_t *entry;
dva_t dst = {{ 0 }};
- uint64_t start = range_tree_min(segs);
+ uint64_t start = zfs_range_tree_min(segs);
ASSERT0(P2PHASE(start, 1 << spa->spa_min_ashift));
ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE);
ASSERT0(P2PHASE(maxalloc, 1 << spa->spa_min_ashift));
- uint64_t size = range_tree_span(segs);
- if (range_tree_span(segs) > maxalloc) {
+ uint64_t size = zfs_range_tree_span(segs);
+ if (zfs_range_tree_span(segs) > maxalloc) {
/*
* We can't allocate all the segments. Prefer to end
* the allocation at the end of a segment, thus avoiding
* additional split blocks.
*/
- range_seg_max_t search;
+ zfs_range_seg_max_t search;
zfs_btree_index_t where;
- rs_set_start(&search, segs, start + maxalloc);
- rs_set_end(&search, segs, start + maxalloc);
+ zfs_rs_set_start(&search, segs, start + maxalloc);
+ zfs_rs_set_end(&search, segs, start + maxalloc);
(void) zfs_btree_find(&segs->rt_root, &search, &where);
- range_seg_t *rs = zfs_btree_prev(&segs->rt_root, &where,
+ zfs_range_seg_t *rs = zfs_btree_prev(&segs->rt_root, &where,
&where);
if (rs != NULL) {
- size = rs_get_end(rs, segs) - start;
+ size = zfs_rs_get_end(rs, segs) - start;
} else {
/*
* There are no segments that end before maxalloc.
* I.e. the first segment is larger than maxalloc,
* so we must split it.
*/
size = maxalloc;
}
}
ASSERT3U(size, <=, maxalloc);
ASSERT0(P2PHASE(size, 1 << spa->spa_min_ashift));
/*
* An allocation class might not have any remaining vdevs or space
*/
metaslab_class_t *mc = mg->mg_class;
if (mc->mc_groups == 0)
mc = spa_normal_class(spa);
int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg,
METASLAB_DONT_THROTTLE, zal, 0);
if (error == ENOSPC && mc != spa_normal_class(spa)) {
error = metaslab_alloc_dva(spa, spa_normal_class(spa), size,
&dst, 0, NULL, txg, METASLAB_DONT_THROTTLE, zal, 0);
}
if (error != 0)
return (error);
/*
* Determine the ranges that are not actually needed. Offsets are
* relative to the start of the range to be copied (i.e. relative to the
* local variable "start").
*/
- range_tree_t *obsolete_segs = range_tree_create(NULL, RANGE_SEG64, NULL,
- 0, 0);
+ zfs_range_tree_t *obsolete_segs = zfs_range_tree_create(NULL,
+ ZFS_RANGE_SEG64, NULL, 0, 0);
zfs_btree_index_t where;
- range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where);
- ASSERT3U(rs_get_start(rs, segs), ==, start);
- uint64_t prev_seg_end = rs_get_end(rs, segs);
+ zfs_range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where);
+ ASSERT3U(zfs_rs_get_start(rs, segs), ==, start);
+ uint64_t prev_seg_end = zfs_rs_get_end(rs, segs);
while ((rs = zfs_btree_next(&segs->rt_root, &where, &where)) != NULL) {
- if (rs_get_start(rs, segs) >= start + size) {
+ if (zfs_rs_get_start(rs, segs) >= start + size) {
break;
} else {
- range_tree_add(obsolete_segs,
+ zfs_range_tree_add(obsolete_segs,
prev_seg_end - start,
- rs_get_start(rs, segs) - prev_seg_end);
+ zfs_rs_get_start(rs, segs) - prev_seg_end);
}
- prev_seg_end = rs_get_end(rs, segs);
+ prev_seg_end = zfs_rs_get_end(rs, segs);
}
/* We don't end in the middle of an obsolete range */
ASSERT3U(start + size, <=, prev_seg_end);
- range_tree_clear(segs, start, size);
+ zfs_range_tree_clear(segs, start, size);
/*
* We can't have any padding of the allocated size, otherwise we will
* misunderstand what's allocated, and the size of the mapping. We
* prevent padding by ensuring that all devices in the pool have the
* same ashift, and the allocation size is a multiple of the ashift.
*/
VERIFY3U(DVA_GET_ASIZE(&dst), ==, size);
entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP);
DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
entry->vime_mapping.vimep_dst = dst;
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
- entry->vime_obsolete_count = range_tree_space(obsolete_segs);
+ entry->vime_obsolete_count =
+ zfs_range_tree_space(obsolete_segs);
}
vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP);
vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst;
vcsa->vcsa_obsolete_segs = obsolete_segs;
vcsa->vcsa_spa = spa;
vcsa->vcsa_txg = txg;
/*
* See comment before spa_vdev_copy_one_child().
*/
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL,
spa_vdev_copy_segment_done, vcsa, 0);
vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst));
if (dest_vd->vdev_ops == &vdev_mirror_ops) {
for (int i = 0; i < dest_vd->vdev_children; i++) {
vdev_t *child = dest_vd->vdev_child[i];
spa_vdev_copy_one_child(vca, nzio, vd, start,
child, DVA_GET_OFFSET(&dst), i, size);
}
} else {
spa_vdev_copy_one_child(vca, nzio, vd, start,
dest_vd, DVA_GET_OFFSET(&dst), -1, size);
}
zio_nowait(nzio);
list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift);
vdev_dirty(vd, 0, NULL, txg);
return (0);
}
/*
* Complete the removal of a toplevel vdev. This is called as a
* synctask in the same txg that we will sync out the new config (to the
* MOS object) which indicates that this vdev is indirect.
*/
static void
vdev_remove_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(svr->svr_bytes_done[i]);
}
ASSERT3U(spa->spa_removing_phys.sr_copied, ==,
spa->spa_removing_phys.sr_to_copy);
vdev_destroy_spacemaps(vd, tx);
/* destroy leaf zaps, if any */
ASSERT3P(svr->svr_zaplist, !=, NULL);
for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) {
vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx);
}
fnvlist_free(svr->svr_zaplist);
spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx);
/* vd->vdev_path is not available here */
spa_history_log_internal(spa, "vdev remove completed", tx,
"%s vdev %llu", spa_name(spa), (u_longlong_t)vd->vdev_id);
}
static void
vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist)
{
ASSERT3P(zlist, !=, NULL);
ASSERT0(vdev_get_nparity(vd));
if (vd->vdev_leaf_zap != 0) {
char zkey[32];
(void) snprintf(zkey, sizeof (zkey), "%s-%llu",
VDEV_REMOVAL_ZAP_OBJS, (u_longlong_t)vd->vdev_leaf_zap);
fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap);
}
for (uint64_t id = 0; id < vd->vdev_children; id++) {
vdev_remove_enlist_zaps(vd->vdev_child[id], zlist);
}
}
static void
vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg)
{
vdev_t *ivd;
dmu_tx_t *tx;
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
/*
* First, build a list of leaf zaps to be destroyed.
* This is passed to the sync context thread,
* which does the actual unlinking.
*/
svr->svr_zaplist = fnvlist_alloc();
vdev_remove_enlist_zaps(vd, svr->svr_zaplist);
ivd = vdev_add_parent(vd, &vdev_indirect_ops);
ivd->vdev_removing = 0;
vd->vdev_leaf_zap = 0;
vdev_remove_child(ivd, vd);
vdev_compact_children(ivd);
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
mutex_enter(&svr->svr_lock);
svr->svr_thread = NULL;
cv_broadcast(&svr->svr_cv);
mutex_exit(&svr->svr_lock);
/* After this, we can not use svr. */
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool,
vdev_remove_complete_sync, svr, tx);
dmu_tx_commit(tx);
}
/*
* Complete the removal of a toplevel vdev. This is called in open
* context by the removal thread after we have copied all vdev's data.
*/
static void
vdev_remove_complete(spa_t *spa)
{
uint64_t txg;
/*
* Wait for any deferred frees to be synced before we call
* vdev_metaslab_fini()
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
txg = spa_vdev_enter(spa);
vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
vdev_rebuild_stop_wait(vd);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
uint64_t vdev_space = spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
(u_longlong_t)vd->vdev_id, (u_longlong_t)txg);
ASSERT3U(0, !=, vdev_space);
ASSERT3U(spa->spa_nonallocating_dspace, >=, vdev_space);
/* the vdev is no longer part of the dspace */
spa->spa_nonallocating_dspace -= vdev_space;
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
}
if (vd->vdev_log_mg != NULL) {
ASSERT0(vd->vdev_ms_count);
metaslab_group_destroy(vd->vdev_log_mg);
vd->vdev_log_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
vdev_remove_replace_with_indirect(vd, txg);
/*
* We now release the locks, allowing spa_sync to run and finish the
* removal via vdev_remove_complete_sync in syncing context.
*
* Note that we hold on to the vdev_t that has been replaced. Since
* it isn't part of the vdev tree any longer, it can't be concurrently
* manipulated, even while we don't have the config lock.
*/
(void) spa_vdev_exit(spa, NULL, txg, 0);
/*
* Top ZAP should have been transferred to the indirect vdev in
* vdev_remove_replace_with_indirect.
*/
ASSERT0(vd->vdev_top_zap);
/*
* Leaf ZAP should have been moved in vdev_remove_replace_with_indirect.
*/
ASSERT0(vd->vdev_leaf_zap);
txg = spa_vdev_enter(spa);
(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Request to update the config and the config cachefile.
*/
vdev_config_dirty(spa->spa_root_vdev);
(void) spa_vdev_exit(spa, vd, txg, 0);
if (ev != NULL)
spa_event_post(ev);
}
/*
* Evacuates a segment of size at most max_alloc from the vdev
* via repeated calls to spa_vdev_copy_segment. If an allocation
* fails, the pool is probably too fragmented to handle such a
* large size, so decrease max_alloc so that the caller will not try
* this size again this txg.
*/
static void
spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
uint64_t *max_alloc, dmu_tx_t *tx)
{
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
mutex_enter(&svr->svr_lock);
/*
* Determine how big of a chunk to copy. We can allocate up
* to max_alloc bytes, and we can span up to vdev_removal_max_span
* bytes of unallocated space at a time. "segs" will track the
* allocated segments that we are copying. We may also be copying
* free segments (of up to vdev_removal_max_span bytes).
*/
- range_tree_t *segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
+ zfs_range_tree_t *segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
+ NULL, 0, 0);
for (;;) {
- range_tree_t *rt = svr->svr_allocd_segs;
- range_seg_t *rs = range_tree_first(rt);
+ zfs_range_tree_t *rt = svr->svr_allocd_segs;
+ zfs_range_seg_t *rs = zfs_range_tree_first(rt);
if (rs == NULL)
break;
uint64_t seg_length;
- if (range_tree_is_empty(segs)) {
+ if (zfs_range_tree_is_empty(segs)) {
/* need to truncate the first seg based on max_alloc */
- seg_length = MIN(rs_get_end(rs, rt) - rs_get_start(rs,
- rt), *max_alloc);
+ seg_length = MIN(zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt), *max_alloc);
} else {
- if (rs_get_start(rs, rt) - range_tree_max(segs) >
- vdev_removal_max_span) {
+ if (zfs_rs_get_start(rs, rt) - zfs_range_tree_max(segs)
+ > vdev_removal_max_span) {
/*
* Including this segment would cause us to
* copy a larger unneeded chunk than is allowed.
*/
break;
- } else if (rs_get_end(rs, rt) - range_tree_min(segs) >
- *max_alloc) {
+ } else if (zfs_rs_get_end(rs, rt) -
+ zfs_range_tree_min(segs) > *max_alloc) {
/*
* This additional segment would extend past
* max_alloc. Rather than splitting this
* segment, leave it for the next mapping.
*/
break;
} else {
- seg_length = rs_get_end(rs, rt) -
- rs_get_start(rs, rt);
+ seg_length = zfs_rs_get_end(rs, rt) -
+ zfs_rs_get_start(rs, rt);
}
}
- range_tree_add(segs, rs_get_start(rs, rt), seg_length);
- range_tree_remove(svr->svr_allocd_segs,
- rs_get_start(rs, rt), seg_length);
+ zfs_range_tree_add(segs, zfs_rs_get_start(rs, rt), seg_length);
+ zfs_range_tree_remove(svr->svr_allocd_segs,
+ zfs_rs_get_start(rs, rt), seg_length);
}
- if (range_tree_is_empty(segs)) {
+ if (zfs_range_tree_is_empty(segs)) {
mutex_exit(&svr->svr_lock);
- range_tree_destroy(segs);
+ zfs_range_tree_destroy(segs);
return;
}
if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync,
svr, tx);
}
- svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs);
+ svr->svr_max_offset_to_sync[txg & TXG_MASK] = zfs_range_tree_max(segs);
/*
* Note: this is the amount of *allocated* space
* that we are taking care of each txg.
*/
- svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs);
+ svr->svr_bytes_done[txg & TXG_MASK] += zfs_range_tree_space(segs);
mutex_exit(&svr->svr_lock);
zio_alloc_list_t zal;
metaslab_trace_init(&zal);
uint64_t thismax = SPA_MAXBLOCKSIZE;
- while (!range_tree_is_empty(segs)) {
+ while (!zfs_range_tree_is_empty(segs)) {
int error = spa_vdev_copy_segment(vd,
segs, thismax, txg, vca, &zal);
if (error == ENOSPC) {
/*
* Cut our segment in half, and don't try this
* segment size again this txg. Note that the
* allocation size must be aligned to the highest
* ashift in the pool, so that the allocation will
* not be padded out to a multiple of the ashift,
* which could cause us to think that this mapping
* is larger than we intended.
*/
ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
uint64_t attempted =
- MIN(range_tree_span(segs), thismax);
+ MIN(zfs_range_tree_span(segs), thismax);
thismax = P2ROUNDUP(attempted / 2,
1 << spa->spa_max_ashift);
/*
* The minimum-size allocation can not fail.
*/
ASSERT3U(attempted, >, 1 << spa->spa_max_ashift);
*max_alloc = attempted - (1 << spa->spa_max_ashift);
} else {
ASSERT0(error);
/*
* We've performed an allocation, so reset the
* alloc trace list.
*/
metaslab_trace_fini(&zal);
metaslab_trace_init(&zal);
}
}
metaslab_trace_fini(&zal);
- range_tree_destroy(segs);
+ zfs_range_tree_destroy(segs);
}
/*
* The size of each removal mapping is limited by the tunable
* zfs_remove_max_segment, but we must adjust this to be a multiple of the
* pool's ashift, so that we don't try to split individual sectors regardless
* of the tunable value. (Note that device removal requires that all devices
* have the same ashift, so there's no difference between spa_min_ashift and
* spa_max_ashift.) The raw tunable should not be used elsewhere.
*/
uint64_t
spa_remove_max_segment(spa_t *spa)
{
return (P2ROUNDUP(zfs_remove_max_segment, 1 << spa->spa_max_ashift));
}
/*
* The removal thread operates in open context. It iterates over all
* allocated space in the vdev, by loading each metaslab's spacemap.
* For each contiguous segment of allocated space (capping the segment
* size at SPA_MAXBLOCKSIZE), we:
* - Allocate space for it on another vdev.
* - Create a new mapping from the old location to the new location
* (as a record in svr_new_segments).
* - Initiate a physical read zio to get the data off the removing disk.
* - In the read zio's done callback, initiate a physical write zio to
* write it to the new vdev.
* Note that all of this will take effect when a particular TXG syncs.
* The sync thread ensures that all the phys reads and writes for the syncing
* TXG have completed (see spa_txg_zio) and writes the new mappings to disk
* (see vdev_mapping_sync()).
*/
static __attribute__((noreturn)) void
spa_vdev_remove_thread(void *arg)
{
spa_t *spa = arg;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_copy_arg_t vca;
uint64_t max_alloc = spa_remove_max_segment(spa);
uint64_t last_txg = 0;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t start_offset = vdev_indirect_mapping_max_offset(vim);
ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_removing);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT(vim != NULL);
mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL);
vca.vca_outstanding_bytes = 0;
vca.vca_read_error_bytes = 0;
vca.vca_write_error_bytes = 0;
mutex_enter(&svr->svr_lock);
/*
* Start from vim_max_offset so we pick up where we left off
* if we are restarting the removal after opening the pool.
*/
uint64_t msi;
for (msi = start_offset >> vd->vdev_ms_shift;
msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
ASSERT3U(msi, <=, vd->vdev_ms_count);
- ASSERT0(range_tree_space(svr->svr_allocd_segs));
+ ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* Assert nothing in flight -- ms_*tree is empty.
*/
for (int i = 0; i < TXG_SIZE; i++) {
- ASSERT0(range_tree_space(msp->ms_allocating[i]));
+ ASSERT0(zfs_range_tree_space(msp->ms_allocating[i]));
}
/*
* If the metaslab has ever been allocated from (ms_sm!=NULL),
* read the allocated segments from the space map object
* into svr_allocd_segs. Since we do this while holding
* svr_lock and ms_sync_lock, concurrent frees (which
* would have modified the space map) will wait for us
* to finish loading the spacemap, and then take the
* appropriate action (see free_from_removing_vdev()).
*/
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
- range_tree_walk(msp->ms_unflushed_allocs,
- range_tree_add, svr->svr_allocd_segs);
- range_tree_walk(msp->ms_unflushed_frees,
- range_tree_remove, svr->svr_allocd_segs);
- range_tree_walk(msp->ms_freeing,
- range_tree_remove, svr->svr_allocd_segs);
+ zfs_range_tree_walk(msp->ms_unflushed_allocs,
+ zfs_range_tree_add, svr->svr_allocd_segs);
+ zfs_range_tree_walk(msp->ms_unflushed_frees,
+ zfs_range_tree_remove, svr->svr_allocd_segs);
+ zfs_range_tree_walk(msp->ms_freeing,
+ zfs_range_tree_remove, svr->svr_allocd_segs);
/*
* When we are resuming from a paused removal (i.e.
* when importing a pool with a removal in progress),
* discard any state that we have already processed.
*/
- range_tree_clear(svr->svr_allocd_segs, 0, start_offset);
+ zfs_range_tree_clear(svr->svr_allocd_segs, 0,
+ start_offset);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&msp->ms_sync_lock);
vca.vca_msp = msp;
zfs_dbgmsg("copying %llu segments for metaslab %llu",
(u_longlong_t)zfs_btree_numnodes(
&svr->svr_allocd_segs->rt_root),
(u_longlong_t)msp->ms_id);
while (!svr->svr_thread_exit &&
- !range_tree_is_empty(svr->svr_allocd_segs)) {
+ !zfs_range_tree_is_empty(svr->svr_allocd_segs)) {
mutex_exit(&svr->svr_lock);
/*
* We need to periodically drop the config lock so that
* writers can get in. Additionally, we can't wait
* for a txg to sync while holding a config lock
* (since a waiting writer could cause a 3-way deadlock
* with the sync thread, which also gets a config
* lock for reader). So we can't hold the config lock
* while calling dmu_tx_assign().
*/
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* This delay will pause the removal around the point
* specified by zfs_removal_suspend_progress. We do this
* solely from the test suite or during debugging.
*/
while (zfs_removal_suspend_progress &&
!svr->svr_thread_exit)
delay(hz);
mutex_enter(&vca.vca_lock);
while (vca.vca_outstanding_bytes >
zfs_remove_max_copy_bytes) {
cv_wait(&vca.vca_cv, &vca.vca_lock);
}
mutex_exit(&vca.vca_lock);
dmu_tx_t *tx =
dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
/*
* Reacquire the vdev_config lock. The vdev_t
* that we're removing may have changed, e.g. due
* to a vdev_attach or vdev_detach.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd = vdev_lookup_top(spa, svr->svr_vdev_id);
if (txg != last_txg)
max_alloc = spa_remove_max_segment(spa);
last_txg = txg;
spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx);
dmu_tx_commit(tx);
mutex_enter(&svr->svr_lock);
}
mutex_enter(&vca.vca_lock);
if (zfs_removal_ignore_errors == 0 &&
(vca.vca_read_error_bytes > 0 ||
vca.vca_write_error_bytes > 0)) {
svr->svr_thread_exit = B_TRUE;
}
mutex_exit(&vca.vca_lock);
}
mutex_exit(&svr->svr_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* Wait for all copies to finish before cleaning up the vca.
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
ASSERT0(vca.vca_outstanding_bytes);
mutex_destroy(&vca.vca_lock);
cv_destroy(&vca.vca_cv);
if (svr->svr_thread_exit) {
mutex_enter(&svr->svr_lock);
- range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
+ zfs_range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
svr->svr_thread = NULL;
cv_broadcast(&svr->svr_cv);
mutex_exit(&svr->svr_lock);
/*
* During the removal process an unrecoverable read or write
* error was encountered. The removal process must be
* cancelled or this damage may become permanent.
*/
if (zfs_removal_ignore_errors == 0 &&
(vca.vca_read_error_bytes > 0 ||
vca.vca_write_error_bytes > 0)) {
zfs_dbgmsg("canceling removal due to IO errors: "
"[read_error_bytes=%llu] [write_error_bytes=%llu]",
(u_longlong_t)vca.vca_read_error_bytes,
(u_longlong_t)vca.vca_write_error_bytes);
spa_vdev_remove_cancel_impl(spa);
}
} else {
- ASSERT0(range_tree_space(svr->svr_allocd_segs));
+ ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
vdev_remove_complete(spa);
}
thread_exit();
}
void
spa_vdev_remove_suspend(spa_t *spa)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
if (svr == NULL)
return;
mutex_enter(&svr->svr_lock);
svr->svr_thread_exit = B_TRUE;
while (svr->svr_thread != NULL)
cv_wait(&svr->svr_cv, &svr->svr_lock);
svr->svr_thread_exit = B_FALSE;
mutex_exit(&svr->svr_lock);
}
/*
* Return true if the "allocating" property has been set to "off"
*/
static boolean_t
vdev_prop_allocating_off(vdev_t *vd)
{
uint64_t objid = vd->vdev_top_zap;
uint64_t allocating = 1;
/* no vdev property object => no props */
if (objid != 0) {
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
mutex_enter(&spa->spa_props_lock);
(void) zap_lookup(mos, objid, "allocating", sizeof (uint64_t),
1, &allocating);
mutex_exit(&spa->spa_props_lock);
}
return (allocating == 0);
}
static int
spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (spa->spa_vdev_removal == NULL)
return (ENOTACTIVE);
return (0);
}
/*
* Cancel a removal by freeing all entries from the partial mapping
* and marking the vdev as no longer being removing.
*/
static void
spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
objset_t *mos = spa->spa_meta_objset;
ASSERT3P(svr->svr_thread, ==, NULL);
spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
boolean_t are_precise;
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (are_precise) {
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx));
}
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
space_map_free(vd->vdev_obsolete_sm, tx);
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(list_is_empty(&svr->svr_new_segments[i]));
ASSERT3U(svr->svr_max_offset_to_sync[i], <=,
vdev_indirect_mapping_max_offset(vim));
}
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
break;
- ASSERT0(range_tree_space(svr->svr_allocd_segs));
+ ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
mutex_enter(&msp->ms_lock);
/*
* Assert nothing in flight -- ms_*tree is empty.
*/
for (int i = 0; i < TXG_SIZE; i++)
- ASSERT0(range_tree_space(msp->ms_allocating[i]));
+ ASSERT0(zfs_range_tree_space(msp->ms_allocating[i]));
for (int i = 0; i < TXG_DEFER_SIZE; i++)
- ASSERT0(range_tree_space(msp->ms_defer[i]));
- ASSERT0(range_tree_space(msp->ms_freed));
+ ASSERT0(zfs_range_tree_space(msp->ms_defer[i]));
+ ASSERT0(zfs_range_tree_space(msp->ms_freed));
if (msp->ms_sm != NULL) {
mutex_enter(&svr->svr_lock);
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
- range_tree_walk(msp->ms_unflushed_allocs,
- range_tree_add, svr->svr_allocd_segs);
- range_tree_walk(msp->ms_unflushed_frees,
- range_tree_remove, svr->svr_allocd_segs);
- range_tree_walk(msp->ms_freeing,
- range_tree_remove, svr->svr_allocd_segs);
+ zfs_range_tree_walk(msp->ms_unflushed_allocs,
+ zfs_range_tree_add, svr->svr_allocd_segs);
+ zfs_range_tree_walk(msp->ms_unflushed_frees,
+ zfs_range_tree_remove, svr->svr_allocd_segs);
+ zfs_range_tree_walk(msp->ms_freeing,
+ zfs_range_tree_remove, svr->svr_allocd_segs);
/*
* Clear everything past what has been synced,
* because we have not allocated mappings for it yet.
*/
uint64_t syncd = vdev_indirect_mapping_max_offset(vim);
uint64_t sm_end = msp->ms_sm->sm_start +
msp->ms_sm->sm_size;
if (sm_end > syncd)
- range_tree_clear(svr->svr_allocd_segs,
+ zfs_range_tree_clear(svr->svr_allocd_segs,
syncd, sm_end - syncd);
mutex_exit(&svr->svr_lock);
}
mutex_exit(&msp->ms_lock);
mutex_enter(&svr->svr_lock);
- range_tree_vacate(svr->svr_allocd_segs,
+ zfs_range_tree_vacate(svr->svr_allocd_segs,
free_mapped_segment_cb, vd);
mutex_exit(&svr->svr_lock);
}
/*
* Note: this must happen after we invoke free_mapped_segment_cb,
* because it adds to the obsolete_segments.
*/
- range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
+ zfs_range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
ASSERT3U(vic->vic_mapping_object, ==,
vdev_indirect_mapping_object(vd->vdev_indirect_mapping));
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vd->vdev_indirect_mapping = NULL;
vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
vic->vic_mapping_object = 0;
ASSERT3U(vic->vic_births_object, ==,
vdev_indirect_births_object(vd->vdev_indirect_births));
vdev_indirect_births_close(vd->vdev_indirect_births);
vd->vdev_indirect_births = NULL;
vdev_indirect_births_free(mos, vic->vic_births_object, tx);
vic->vic_births_object = 0;
/*
* We may have processed some frees from the removing vdev in this
* txg, thus increasing svr_bytes_done; discard that here to
* satisfy the assertions in spa_vdev_removal_destroy().
* Note that future txg's can not have any bytes_done, because
* future TXG's are only modified from open context, and we have
* already shut down the copying thread.
*/
svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0;
spa_finish_removal(spa, DSS_CANCELED, tx);
vd->vdev_removing = B_FALSE;
if (!vdev_prop_allocating_off(vd)) {
spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER);
vdev_activate(vd);
spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG);
}
vdev_config_dirty(vd);
zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx));
spa_history_log_internal(spa, "vdev remove canceled", tx,
"%s vdev %llu %s", spa_name(spa),
(u_longlong_t)vd->vdev_id,
(vd->vdev_path != NULL) ? vd->vdev_path : "-");
}
static int
spa_vdev_remove_cancel_impl(spa_t *spa)
{
int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check,
spa_vdev_remove_cancel_sync, NULL, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED);
return (error);
}
int
spa_vdev_remove_cancel(spa_t *spa)
{
spa_vdev_remove_suspend(spa);
if (spa->spa_vdev_removal == NULL)
return (ENOTACTIVE);
return (spa_vdev_remove_cancel_impl(spa));
}
void
svr_sync(spa_t *spa, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
if (svr == NULL)
return;
/*
* This check is necessary so that we do not dirty the
* DIRECTORY_OBJECT via spa_sync_removing_state() when there
* is nothing to do. Dirtying it every time would prevent us
* from syncing-to-convergence.
*/
if (svr->svr_bytes_done[txgoff] == 0)
return;
/*
* Update progress accounting.
*/
spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff];
svr->svr_bytes_done[txgoff] = 0;
spa_sync_removing_state(spa, tx);
}
static void
vdev_remove_make_hole_and_free(vdev_t *vd)
{
uint64_t id = vd->vdev_id;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
vdev_free(vd);
vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
vdev_add_child(rvd, vd);
vdev_config_dirty(rvd);
/*
* Reassess the health of our root vdev.
*/
vdev_reopen(rvd);
}
/*
* Remove a log device. The config lock is held for the specified TXG.
*/
static int
spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
{
metaslab_group_t *mg = vd->vdev_mg;
spa_t *spa = vd->vdev_spa;
int error = 0;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT3P(vd->vdev_log_mg, ==, NULL);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Stop allocating from this vdev.
*/
metaslab_group_passivate(mg);
/*
* Wait for the youngest allocations and frees to sync,
* and then wait for the deferral of those frees to finish.
*/
spa_vdev_config_exit(spa, NULL,
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
/*
* Cancel any initialize or TRIM which was in progress.
*/
vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED);
vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED);
vdev_autotrim_stop_wait(vd);
/*
* Evacuate the device. We don't hold the config lock as
* writer since we need to do I/O but we do keep the
* spa_namespace_lock held. Once this completes the device
* should no longer have any blocks allocated on it.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (vd->vdev_stat.vs_alloc != 0)
error = spa_reset_logs(spa);
*txg = spa_vdev_config_enter(spa);
if (error != 0) {
metaslab_group_activate(mg);
ASSERT3P(vd->vdev_log_mg, ==, NULL);
return (error);
}
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* The evacuation succeeded. Remove any remaining MOS metadata
* associated with this vdev, and wait for these changes to sync.
*/
vd->vdev_removing = B_TRUE;
vdev_dirty_leaves(vd, VDD_DTL, *txg);
vdev_config_dirty(vd);
/*
* When the log space map feature is enabled we look at
* the vdev's top_zap to find the on-disk flush data of
* the metaslab we just flushed. Thus, while removing a
* log vdev we make sure to call vdev_metaslab_fini()
* first, which removes all metaslabs of this vdev from
* spa_metaslabs_by_flushed before vdev_remove_empty()
* destroys the top_zap of this log vdev.
*
* This avoids the scenario where we flush a metaslab
* from the log vdev being removed that doesn't have a
* top_zap and end up failing to lookup its on-disk flush
* data.
*
* We don't call metaslab_group_destroy() right away
* though (it will be called in vdev_free() later) as
* during metaslab_sync() of metaslabs from other vdevs
* we may touch the metaslab group of this vdev through
* metaslab_class_histogram_verify()
*/
vdev_metaslab_fini(vd);
spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
*txg = spa_vdev_config_enter(spa);
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/* The top ZAP should have been destroyed by vdev_remove_empty. */
ASSERT0(vd->vdev_top_zap);
/* The leaf ZAP should have been destroyed by vdev_dtl_sync. */
ASSERT0(vd->vdev_leaf_zap);
(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
if (list_link_active(&vd->vdev_state_dirty_node))
vdev_state_clean(vd);
if (list_link_active(&vd->vdev_config_dirty_node))
vdev_config_clean(vd);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Clean up the vdev namespace.
*/
vdev_remove_make_hole_and_free(vd);
if (ev != NULL)
spa_event_post(ev);
return (0);
}
static int
spa_vdev_remove_top_check(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
if (vd != vd->vdev_top)
return (SET_ERROR(ENOTSUP));
if (!vdev_is_concrete(vd))
return (SET_ERROR(ENOTSUP));
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL))
return (SET_ERROR(ENOTSUP));
/*
* This device is already being removed
*/
if (vd->vdev_removing)
return (SET_ERROR(EALREADY));
metaslab_class_t *mc = vd->vdev_mg->mg_class;
metaslab_class_t *normal = spa_normal_class(spa);
if (mc != normal) {
/*
* Space allocated from the special (or dedup) class is
* included in the DMU's space usage, but it's not included
* in spa_dspace (or dsl_pool_adjustedsize()). Therefore
* there is always at least as much free space in the normal
* class, as is allocated from the special (and dedup) class.
* As a backup check, we will return ENOSPC if this is
* violated. See also spa_update_dspace().
*/
uint64_t available = metaslab_class_get_space(normal) -
metaslab_class_get_alloc(normal);
ASSERT3U(available, >=, vd->vdev_stat.vs_alloc);
if (available < vd->vdev_stat.vs_alloc)
return (SET_ERROR(ENOSPC));
} else if (!vd->vdev_noalloc) {
/* available space in the pool's normal class */
uint64_t available = dsl_dir_space_available(
spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE);
if (available < vd->vdev_stat.vs_dspace)
return (SET_ERROR(ENOSPC));
}
/*
* There can not be a removal in progress.
*/
if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
return (SET_ERROR(EBUSY));
/*
* The device must have all its data.
*/
if (!vdev_dtl_empty(vd, DTL_MISSING) ||
!vdev_dtl_empty(vd, DTL_OUTAGE))
return (SET_ERROR(EBUSY));
/*
* The device must be healthy.
*/
if (!vdev_readable(vd))
return (SET_ERROR(EIO));
/*
* All vdevs in normal class must have the same ashift.
*/
if (spa->spa_max_ashift != spa->spa_min_ashift) {
return (SET_ERROR(EINVAL));
}
/*
* A removed special/dedup vdev must have same ashift as normal class.
*/
ASSERT(!vd->vdev_islog);
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE &&
vd->vdev_ashift != spa->spa_max_ashift) {
return (SET_ERROR(EINVAL));
}
/*
* All vdevs in normal class must have the same ashift
* and not be raidz or draid.
*/
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t id = 0; id < rvd->vdev_children; id++) {
vdev_t *cvd = rvd->vdev_child[id];
/*
* A removed special/dedup vdev must have the same ashift
* across all vdevs in its class.
*/
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE &&
cvd->vdev_alloc_bias == vd->vdev_alloc_bias &&
cvd->vdev_ashift != vd->vdev_ashift) {
return (SET_ERROR(EINVAL));
}
if (cvd->vdev_ashift != 0 &&
cvd->vdev_alloc_bias == VDEV_BIAS_NONE)
ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift);
if (!vdev_is_concrete(cvd))
continue;
if (vdev_get_nparity(cvd) != 0)
return (SET_ERROR(EINVAL));
/*
* Need the mirror to be mirror of leaf vdevs only
*/
if (cvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < cvd->vdev_children; cid++) {
if (!cvd->vdev_child[cid]->vdev_ops->
vdev_op_leaf)
return (SET_ERROR(EINVAL));
}
}
}
return (0);
}
/*
* Initiate removal of a top-level vdev, reducing the total space in the pool.
* The config lock is held for the specified TXG. Once initiated,
* evacuation of all allocated space (copying it to other vdevs) happens
* in the background (see spa_vdev_remove_thread()), and can be canceled
* (see spa_vdev_remove_cancel()). If successful, the vdev will
* be transformed to an indirect vdev (see spa_vdev_remove_complete()).
*/
static int
spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
{
spa_t *spa = vd->vdev_spa;
boolean_t set_noalloc = B_FALSE;
int error;
/*
* Check for errors up-front, so that we don't waste time
* passivating the metaslab group and clearing the ZIL if there
* are errors.
*/
error = spa_vdev_remove_top_check(vd);
/*
* Stop allocating from this vdev. Note that we must check
* that this is not the only device in the pool before
* passivating, otherwise we will not be able to make
* progress because we can't allocate from any vdevs.
* The above check for sufficient free space serves this
* purpose.
*/
if (error == 0 && !vd->vdev_noalloc) {
set_noalloc = B_TRUE;
error = vdev_passivate(vd, txg);
}
if (error != 0)
return (error);
/*
* We stop any initializing and TRIM that is currently in progress
* but leave the state as "active". This will allow the process to
* resume if the removal is canceled sometime later.
*/
spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_wait(vd);
*txg = spa_vdev_config_enter(spa);
/*
* Things might have changed while the config lock was dropped
* (e.g. space usage). Check for errors again.
*/
error = spa_vdev_remove_top_check(vd);
if (error != 0) {
if (set_noalloc)
vdev_activate(vd);
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
return (error);
}
vd->vdev_removing = B_TRUE;
vdev_dirty_leaves(vd, VDD_DTL, *txg);
vdev_config_dirty(vd);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg);
dsl_sync_task_nowait(spa->spa_dsl_pool,
vdev_remove_initiate_sync, (void *)(uintptr_t)vd->vdev_id, tx);
dmu_tx_commit(tx);
return (0);
}
/*
* Remove a device from the pool.
*
* Removing a device from the vdev namespace requires several steps
* and can take a significant amount of time. As a result we use
* the spa_vdev_config_[enter/exit] functions which allow us to
* grab and release the spa_config_lock while still holding the namespace
* lock. During each step the configuration is synced out.
*/
int
spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
{
vdev_t *vd;
nvlist_t **spares, **l2cache, *nv;
uint64_t txg = 0;
uint_t nspares, nl2cache;
int error = 0, error_log;
boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
sysevent_t *ev = NULL;
const char *vd_type = NULL;
char *vd_path = NULL;
ASSERT(spa_writeable(spa));
if (!locked)
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
if (!locked)
return (spa_vdev_exit(spa, NULL, txg, error));
return (error);
}
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (spa->spa_spares.sav_vdevs != NULL &&
nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
(nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
/*
* Only remove the hot spare if it's not currently in use
* in this pool.
*/
if (vd == NULL || unspare) {
const char *type;
boolean_t draid_spare = B_FALSE;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type)
== 0 && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0)
draid_spare = B_TRUE;
if (vd == NULL && draid_spare) {
error = SET_ERROR(ENOTSUP);
} else {
if (vd == NULL)
vd = spa_lookup_by_guid(spa,
guid, B_TRUE);
ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_AUX);
vd_type = VDEV_TYPE_SPARE;
vd_path = spa_strdup(fnvlist_lookup_string(
nv, ZPOOL_CONFIG_PATH));
spa_vdev_remove_aux(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares, nv);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
} else {
error = SET_ERROR(EBUSY);
}
} else if (spa->spa_l2cache.sav_vdevs != NULL &&
nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
(nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
vd_type = VDEV_TYPE_L2CACHE;
vd_path = spa_strdup(fnvlist_lookup_string(
nv, ZPOOL_CONFIG_PATH));
/*
* Cache devices can always be removed.
*/
vd = spa_lookup_by_guid(spa, guid, B_TRUE);
/*
* Stop trimming the cache device. We need to release the
* config lock to allow the syncing of TRIM transactions
* without releasing the spa_namespace_lock. The same
* strategy is employed in spa_vdev_remove_top().
*/
spa_vdev_config_exit(spa, NULL,
txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
mutex_exit(&vd->vdev_trim_lock);
txg = spa_vdev_config_enter(spa);
ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
} else if (vd != NULL && vd->vdev_islog) {
ASSERT(!locked);
vd_type = VDEV_TYPE_LOG;
vd_path = spa_strdup((vd->vdev_path != NULL) ?
vd->vdev_path : "-");
error = spa_vdev_remove_log(vd, &txg);
} else if (vd != NULL) {
ASSERT(!locked);
error = spa_vdev_remove_top(vd, &txg);
} else {
/*
* There is no vdev of any kind with the specified guid.
*/
error = SET_ERROR(ENOENT);
}
error_log = error;
if (!locked)
error = spa_vdev_exit(spa, NULL, txg, error);
/*
* Logging must be done outside the spa config lock. Otherwise,
* this code path could end up holding the spa config lock while
* waiting for a txg_sync so it can write to the internal log.
* Doing that would prevent the txg sync from actually happening,
* causing a deadlock.
*/
if (error_log == 0 && vd_type != NULL && vd_path != NULL) {
spa_history_log_internal(spa, "vdev remove", NULL,
"%s vdev (%s) %s", spa_name(spa), vd_type, vd_path);
}
if (vd_path != NULL)
spa_strfree(vd_path);
if (ev != NULL)
spa_event_post(ev);
return (error);
}
int
spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs)
{
prs->prs_state = spa->spa_removing_phys.sr_state;
if (prs->prs_state == DSS_NONE)
return (SET_ERROR(ENOENT));
prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev;
prs->prs_start_time = spa->spa_removing_phys.sr_start_time;
prs->prs_end_time = spa->spa_removing_phys.sr_end_time;
prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy;
prs->prs_copied = spa->spa_removing_phys.sr_copied;
prs->prs_mapping_memory = 0;
uint64_t indirect_vdev_id =
spa->spa_removing_phys.sr_prev_indirect_vdev;
while (indirect_vdev_id != -1) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
prs->prs_mapping_memory += vdev_indirect_mapping_size(vim);
indirect_vdev_id = vic->vic_prev_indirect_vdev;
}
return (0);
}
ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_ignore_errors, INT, ZMOD_RW,
"Ignore hard IO errors when removing device");
ZFS_MODULE_PARAM(zfs_vdev, zfs_, remove_max_segment, UINT, ZMOD_RW,
"Largest contiguous segment to allocate when removing device");
ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, UINT, ZMOD_RW,
"Largest span of free chunks a remap segment can span");
ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, UINT, ZMOD_RW,
"Pause device removal after this many bytes are copied "
"(debug use only - causes removal to hang)");
EXPORT_SYMBOL(free_from_removing_vdev);
EXPORT_SYMBOL(spa_removal_get_stats);
EXPORT_SYMBOL(spa_remove_init);
EXPORT_SYMBOL(spa_restart_removal);
EXPORT_SYMBOL(spa_vdev_removal_destroy);
EXPORT_SYMBOL(spa_vdev_remove);
EXPORT_SYMBOL(spa_vdev_remove_cancel);
EXPORT_SYMBOL(spa_vdev_remove_suspend);
EXPORT_SYMBOL(svr_sync);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_trim.c b/sys/contrib/openzfs/module/zfs/vdev_trim.c
index 9cf10332e8bf..1ca0b23c0ee4 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_trim.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_trim.c
@@ -1,1788 +1,1791 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, 2024 by Delphix. All rights reserved.
* Copyright (c) 2019 by Lawrence Livermore National Security, LLC.
* Copyright (c) 2021 Hewlett Packard Enterprise Development LP
* Copyright 2023 RackTop Systems, Inc.
*/
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/txg.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_synctask.h>
#include <sys/zap.h>
#include <sys/dmu_tx.h>
#include <sys/arc_impl.h>
/*
* TRIM is a feature which is used to notify a SSD that some previously
* written space is no longer allocated by the pool. This is useful because
* writes to a SSD must be performed to blocks which have first been erased.
* Ensuring the SSD always has a supply of erased blocks for new writes
* helps prevent the performance from deteriorating.
*
* There are two supported TRIM methods; manual and automatic.
*
* Manual TRIM:
*
* A manual TRIM is initiated by running the 'zpool trim' command. A single
* 'vdev_trim' thread is created for each leaf vdev, and it is responsible for
* managing that vdev TRIM process. This involves iterating over all the
* metaslabs, calculating the unallocated space ranges, and then issuing the
* required TRIM I/Os.
*
* While a metaslab is being actively trimmed it is not eligible to perform
* new allocations. After traversing all of the metaslabs the thread is
* terminated. Finally, both the requested options and current progress of
* the TRIM are regularly written to the pool. This allows the TRIM to be
* suspended and resumed as needed.
*
* Automatic TRIM:
*
* An automatic TRIM is enabled by setting the 'autotrim' pool property
* to 'on'. When enabled, a `vdev_autotrim' thread is created for each
* top-level (not leaf) vdev in the pool. These threads perform the same
* core TRIM process as a manual TRIM, but with a few key differences.
*
* 1) Automatic TRIM happens continuously in the background and operates
* solely on recently freed blocks (ms_trim not ms_allocatable).
*
* 2) Each thread is associated with a top-level (not leaf) vdev. This has
* the benefit of simplifying the threading model, it makes it easier
* to coordinate administrative commands, and it ensures only a single
* metaslab is disabled at a time. Unlike manual TRIM, this means each
* 'vdev_autotrim' thread is responsible for issuing TRIM I/Os for its
* children.
*
* 3) There is no automatic TRIM progress information stored on disk, nor
* is it reported by 'zpool status'.
*
* While the automatic TRIM process is highly effective it is more likely
* than a manual TRIM to encounter tiny ranges. Ranges less than or equal to
* 'zfs_trim_extent_bytes_min' (32k) are considered too small to efficiently
* TRIM and are skipped. This means small amounts of freed space may not
* be automatically trimmed.
*
* Furthermore, devices with attached hot spares and devices being actively
* replaced are skipped. This is done to avoid adding additional stress to
* a potentially unhealthy device and to minimize the required rebuild time.
*
* For this reason it may be beneficial to occasionally manually TRIM a pool
* even when automatic TRIM is enabled.
*/
/*
* Maximum size of TRIM I/O, ranges will be chunked in to 128MiB lengths.
*/
static unsigned int zfs_trim_extent_bytes_max = 128 * 1024 * 1024;
/*
* Minimum size of TRIM I/O, extents smaller than 32Kib will be skipped.
*/
static unsigned int zfs_trim_extent_bytes_min = 32 * 1024;
/*
* Skip uninitialized metaslabs during the TRIM process. This option is
* useful for pools constructed from large thinly-provisioned devices where
* TRIM operations are slow. As a pool ages an increasing fraction of
* the pools metaslabs will be initialized progressively degrading the
* usefulness of this option. This setting is stored when starting a
* manual TRIM and will persist for the duration of the requested TRIM.
*/
unsigned int zfs_trim_metaslab_skip = 0;
/*
* Maximum number of queued TRIM I/Os per leaf vdev. The number of
* concurrent TRIM I/Os issued to the device is controlled by the
* zfs_vdev_trim_min_active and zfs_vdev_trim_max_active module options.
*/
static unsigned int zfs_trim_queue_limit = 10;
/*
* The minimum number of transaction groups between automatic trims of a
* metaslab. This setting represents a trade-off between issuing more
* efficient TRIM operations, by allowing them to be aggregated longer,
* and issuing them promptly so the trimmed space is available. Note
* that this value is a minimum; metaslabs can be trimmed less frequently
* when there are a large number of ranges which need to be trimmed.
*
* Increasing this value will allow frees to be aggregated for a longer
* time. This can result is larger TRIM operations, and increased memory
* usage in order to track the ranges to be trimmed. Decreasing this value
* has the opposite effect. The default value of 32 was determined though
* testing to be a reasonable compromise.
*/
static unsigned int zfs_trim_txg_batch = 32;
/*
* The trim_args are a control structure which describe how a leaf vdev
* should be trimmed. The core elements are the vdev, the metaslab being
* trimmed and a range tree containing the extents to TRIM. All provided
* ranges must be within the metaslab.
*/
typedef struct trim_args {
/*
* These fields are set by the caller of vdev_trim_ranges().
*/
vdev_t *trim_vdev; /* Leaf vdev to TRIM */
metaslab_t *trim_msp; /* Disabled metaslab */
- range_tree_t *trim_tree; /* TRIM ranges (in metaslab) */
+ zfs_range_tree_t *trim_tree; /* TRIM ranges (in metaslab) */
trim_type_t trim_type; /* Manual or auto TRIM */
uint64_t trim_extent_bytes_max; /* Maximum TRIM I/O size */
uint64_t trim_extent_bytes_min; /* Minimum TRIM I/O size */
enum trim_flag trim_flags; /* TRIM flags (secure) */
/*
* These fields are updated by vdev_trim_ranges().
*/
hrtime_t trim_start_time; /* Start time */
uint64_t trim_bytes_done; /* Bytes trimmed */
} trim_args_t;
/*
* Determines whether a vdev_trim_thread() should be stopped.
*/
static boolean_t
vdev_trim_should_stop(vdev_t *vd)
{
return (vd->vdev_trim_exit_wanted || !vdev_writeable(vd) ||
vd->vdev_detached || vd->vdev_top->vdev_removing ||
vd->vdev_top->vdev_rz_expanding);
}
/*
* Determines whether a vdev_autotrim_thread() should be stopped.
*/
static boolean_t
vdev_autotrim_should_stop(vdev_t *tvd)
{
return (tvd->vdev_autotrim_exit_wanted ||
!vdev_writeable(tvd) || tvd->vdev_removing ||
tvd->vdev_rz_expanding ||
spa_get_autotrim(tvd->vdev_spa) == SPA_AUTOTRIM_OFF);
}
/*
* Wait for given number of kicks, return true if the wait is aborted due to
* vdev_autotrim_exit_wanted.
*/
static boolean_t
vdev_autotrim_wait_kick(vdev_t *vd, int num_of_kick)
{
mutex_enter(&vd->vdev_autotrim_lock);
for (int i = 0; i < num_of_kick; i++) {
if (vd->vdev_autotrim_exit_wanted)
break;
cv_wait_idle(&vd->vdev_autotrim_kick_cv,
&vd->vdev_autotrim_lock);
}
boolean_t exit_wanted = vd->vdev_autotrim_exit_wanted;
mutex_exit(&vd->vdev_autotrim_lock);
return (exit_wanted);
}
/*
* The sync task for updating the on-disk state of a manual TRIM. This
* is scheduled by vdev_trim_change_state().
*/
static void
vdev_trim_zap_update_sync(void *arg, dmu_tx_t *tx)
{
/*
* We pass in the guid instead of the vdev_t since the vdev may
* have been freed prior to the sync task being processed. This
* happens when a vdev is detached as we call spa_config_vdev_exit(),
* stop the trimming thread, schedule the sync task, and free
* the vdev. Later when the scheduled sync task is invoked, it would
* find that the vdev has been freed.
*/
uint64_t guid = *(uint64_t *)arg;
uint64_t txg = dmu_tx_get_txg(tx);
kmem_free(arg, sizeof (uint64_t));
vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_top->vdev_removing ||
!vdev_is_concrete(vd) || vd->vdev_top->vdev_rz_expanding)
return;
uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK];
vd->vdev_trim_offset[txg & TXG_MASK] = 0;
VERIFY3U(vd->vdev_leaf_zap, !=, 0);
objset_t *mos = vd->vdev_spa->spa_meta_objset;
if (last_offset > 0 || vd->vdev_trim_last_offset == UINT64_MAX) {
if (vd->vdev_trim_last_offset == UINT64_MAX)
last_offset = 0;
vd->vdev_trim_last_offset = last_offset;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_TRIM_LAST_OFFSET,
sizeof (last_offset), 1, &last_offset, tx));
}
if (vd->vdev_trim_action_time > 0) {
uint64_t val = (uint64_t)vd->vdev_trim_action_time;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_TRIM_ACTION_TIME, sizeof (val),
1, &val, tx));
}
if (vd->vdev_trim_rate > 0) {
uint64_t rate = (uint64_t)vd->vdev_trim_rate;
if (rate == UINT64_MAX)
rate = 0;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_TRIM_RATE, sizeof (rate), 1, &rate, tx));
}
uint64_t partial = vd->vdev_trim_partial;
if (partial == UINT64_MAX)
partial = 0;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL,
sizeof (partial), 1, &partial, tx));
uint64_t secure = vd->vdev_trim_secure;
if (secure == UINT64_MAX)
secure = 0;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE,
sizeof (secure), 1, &secure, tx));
uint64_t trim_state = vd->vdev_trim_state;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE,
sizeof (trim_state), 1, &trim_state, tx));
}
/*
* Update the on-disk state of a manual TRIM. This is called to request
* that a TRIM be started/suspended/canceled, or to change one of the
* TRIM options (partial, secure, rate).
*/
static void
vdev_trim_change_state(vdev_t *vd, vdev_trim_state_t new_state,
uint64_t rate, boolean_t partial, boolean_t secure)
{
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
spa_t *spa = vd->vdev_spa;
if (new_state == vd->vdev_trim_state)
return;
/*
* Copy the vd's guid, this will be freed by the sync task.
*/
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/*
* If we're suspending, then preserve the original start time.
*/
if (vd->vdev_trim_state != VDEV_TRIM_SUSPENDED) {
vd->vdev_trim_action_time = gethrestime_sec();
}
/*
* If we're activating, then preserve the requested rate and trim
* method. Setting the last offset and rate to UINT64_MAX is used
* as a sentinel to indicate they should be reset to default values.
*/
if (new_state == VDEV_TRIM_ACTIVE) {
if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE ||
vd->vdev_trim_state == VDEV_TRIM_CANCELED) {
vd->vdev_trim_last_offset = UINT64_MAX;
vd->vdev_trim_rate = UINT64_MAX;
vd->vdev_trim_partial = UINT64_MAX;
vd->vdev_trim_secure = UINT64_MAX;
}
if (rate != 0)
vd->vdev_trim_rate = rate;
if (partial != 0)
vd->vdev_trim_partial = partial;
if (secure != 0)
vd->vdev_trim_secure = secure;
}
vdev_trim_state_t old_state = vd->vdev_trim_state;
boolean_t resumed = (old_state == VDEV_TRIM_SUSPENDED);
vd->vdev_trim_state = new_state;
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_trim_zap_update_sync,
guid, tx);
switch (new_state) {
case VDEV_TRIM_ACTIVE:
spa_event_notify(spa, vd, NULL,
resumed ? ESC_ZFS_TRIM_RESUME : ESC_ZFS_TRIM_START);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s activated", vd->vdev_path);
break;
case VDEV_TRIM_SUSPENDED:
spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_SUSPEND);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s suspended", vd->vdev_path);
break;
case VDEV_TRIM_CANCELED:
if (old_state == VDEV_TRIM_ACTIVE ||
old_state == VDEV_TRIM_SUSPENDED) {
spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_CANCEL);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s canceled", vd->vdev_path);
}
break;
case VDEV_TRIM_COMPLETE:
spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_FINISH);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s complete", vd->vdev_path);
break;
default:
panic("invalid state %llu", (unsigned long long)new_state);
}
dmu_tx_commit(tx);
if (new_state != VDEV_TRIM_ACTIVE)
spa_notify_waiters(spa);
}
/*
* The zio_done_func_t done callback for each manual TRIM issued. It is
* responsible for updating the TRIM stats, reissuing failed TRIM I/Os,
* and limiting the number of in flight TRIM I/Os.
*/
static void
vdev_trim_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_trim_io_lock);
if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
/*
* The I/O failed because the vdev was unavailable; roll the
* last offset back. (This works because spa_sync waits on
* spa_txg_zio before it runs sync tasks.)
*/
uint64_t *offset =
&vd->vdev_trim_offset[zio->io_txg & TXG_MASK];
*offset = MIN(*offset, zio->io_offset);
} else {
if (zio->io_error != 0) {
vd->vdev_stat.vs_trim_errors++;
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL,
0, 0, 0, 0, 1, zio->io_orig_size);
} else {
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL,
1, zio->io_orig_size, 0, 0, 0, 0);
}
vd->vdev_trim_bytes_done += zio->io_orig_size;
}
ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_MANUAL], >, 0);
vd->vdev_trim_inflight[TRIM_TYPE_MANUAL]--;
cv_broadcast(&vd->vdev_trim_io_cv);
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* The zio_done_func_t done callback for each automatic TRIM issued. It
* is responsible for updating the TRIM stats and limiting the number of
* in flight TRIM I/Os. Automatic TRIM I/Os are best effort and are
* never reissued on failure.
*/
static void
vdev_autotrim_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_trim_io_lock);
if (zio->io_error != 0) {
vd->vdev_stat.vs_trim_errors++;
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO,
0, 0, 0, 0, 1, zio->io_orig_size);
} else {
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO,
1, zio->io_orig_size, 0, 0, 0, 0);
}
ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_AUTO], >, 0);
vd->vdev_trim_inflight[TRIM_TYPE_AUTO]--;
cv_broadcast(&vd->vdev_trim_io_cv);
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* The zio_done_func_t done callback for each TRIM issued via
* vdev_trim_simple(). It is responsible for updating the TRIM stats and
* limiting the number of in flight TRIM I/Os. Simple TRIM I/Os are best
* effort and are never reissued on failure.
*/
static void
vdev_trim_simple_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_trim_io_lock);
if (zio->io_error != 0) {
vd->vdev_stat.vs_trim_errors++;
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_SIMPLE,
0, 0, 0, 0, 1, zio->io_orig_size);
} else {
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_SIMPLE,
1, zio->io_orig_size, 0, 0, 0, 0);
}
ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE], >, 0);
vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE]--;
cv_broadcast(&vd->vdev_trim_io_cv);
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* Returns the average trim rate in bytes/sec for the ta->trim_vdev.
*/
static uint64_t
vdev_trim_calculate_rate(trim_args_t *ta)
{
return (ta->trim_bytes_done * 1000 /
(NSEC2MSEC(gethrtime() - ta->trim_start_time) + 1));
}
/*
* Issues a physical TRIM and takes care of rate limiting (bytes/sec)
* and number of concurrent TRIM I/Os.
*/
static int
vdev_trim_range(trim_args_t *ta, uint64_t start, uint64_t size)
{
vdev_t *vd = ta->trim_vdev;
spa_t *spa = vd->vdev_spa;
void *cb;
mutex_enter(&vd->vdev_trim_io_lock);
/*
* Limit manual TRIM I/Os to the requested rate. This does not
* apply to automatic TRIM since no per vdev rate can be specified.
*/
if (ta->trim_type == TRIM_TYPE_MANUAL) {
while (vd->vdev_trim_rate != 0 && !vdev_trim_should_stop(vd) &&
vdev_trim_calculate_rate(ta) > vd->vdev_trim_rate) {
cv_timedwait_idle(&vd->vdev_trim_io_cv,
&vd->vdev_trim_io_lock, ddi_get_lbolt() +
MSEC_TO_TICK(10));
}
}
ta->trim_bytes_done += size;
/* Limit in flight trimming I/Os */
while (vd->vdev_trim_inflight[0] + vd->vdev_trim_inflight[1] +
vd->vdev_trim_inflight[2] >= zfs_trim_queue_limit) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
vd->vdev_trim_inflight[ta->trim_type]++;
mutex_exit(&vd->vdev_trim_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
mutex_enter(&vd->vdev_trim_lock);
if (ta->trim_type == TRIM_TYPE_MANUAL &&
vd->vdev_trim_offset[txg & TXG_MASK] == 0) {
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/* This is the first write of this txg. */
dsl_sync_task_nowait(spa_get_dsl(spa),
vdev_trim_zap_update_sync, guid, tx);
}
/*
* We know the vdev_t will still be around since all consumers of
* vdev_free must stop the trimming first.
*/
if ((ta->trim_type == TRIM_TYPE_MANUAL &&
vdev_trim_should_stop(vd)) ||
(ta->trim_type == TRIM_TYPE_AUTO &&
vdev_autotrim_should_stop(vd->vdev_top))) {
mutex_enter(&vd->vdev_trim_io_lock);
vd->vdev_trim_inflight[ta->trim_type]--;
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
mutex_exit(&vd->vdev_trim_lock);
dmu_tx_commit(tx);
return (SET_ERROR(EINTR));
}
mutex_exit(&vd->vdev_trim_lock);
if (ta->trim_type == TRIM_TYPE_MANUAL)
vd->vdev_trim_offset[txg & TXG_MASK] = start + size;
if (ta->trim_type == TRIM_TYPE_MANUAL) {
cb = vdev_trim_cb;
} else if (ta->trim_type == TRIM_TYPE_AUTO) {
cb = vdev_autotrim_cb;
} else {
cb = vdev_trim_simple_cb;
}
zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd,
start, size, cb, NULL, ZIO_PRIORITY_TRIM, ZIO_FLAG_CANFAIL,
ta->trim_flags));
/* vdev_trim_cb and vdev_autotrim_cb release SCL_STATE_ALL */
dmu_tx_commit(tx);
return (0);
}
/*
* Issues TRIM I/Os for all ranges in the provided ta->trim_tree range tree.
* Additional parameters describing how the TRIM should be performed must
* be set in the trim_args structure. See the trim_args definition for
* additional information.
*/
static int
vdev_trim_ranges(trim_args_t *ta)
{
vdev_t *vd = ta->trim_vdev;
zfs_btree_t *t = &ta->trim_tree->rt_root;
zfs_btree_index_t idx;
uint64_t extent_bytes_max = ta->trim_extent_bytes_max;
uint64_t extent_bytes_min = ta->trim_extent_bytes_min;
spa_t *spa = vd->vdev_spa;
int error = 0;
ta->trim_start_time = gethrtime();
ta->trim_bytes_done = 0;
- for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
+ for (zfs_range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
rs = zfs_btree_next(t, &idx, &idx)) {
- uint64_t size = rs_get_end(rs, ta->trim_tree) - rs_get_start(rs,
- ta->trim_tree);
+ uint64_t size = zfs_rs_get_end(rs, ta->trim_tree) -
+ zfs_rs_get_start(rs, ta->trim_tree);
if (extent_bytes_min && size < extent_bytes_min) {
spa_iostats_trim_add(spa, ta->trim_type,
0, 0, 1, size, 0, 0);
continue;
}
/* Split range into legally-sized physical chunks */
uint64_t writes_required = ((size - 1) / extent_bytes_max) + 1;
for (uint64_t w = 0; w < writes_required; w++) {
error = vdev_trim_range(ta, VDEV_LABEL_START_SIZE +
- rs_get_start(rs, ta->trim_tree) +
+ zfs_rs_get_start(rs, ta->trim_tree) +
(w *extent_bytes_max), MIN(size -
(w * extent_bytes_max), extent_bytes_max));
if (error != 0) {
goto done;
}
}
}
done:
/*
* Make sure all TRIMs for this metaslab have completed before
* returning. TRIM zios have lower priority over regular or syncing
* zios, so all TRIM zios for this metaslab must complete before the
* metaslab is re-enabled. Otherwise it's possible write zios to
* this metaslab could cut ahead of still queued TRIM zios for this
* metaslab causing corruption if the ranges overlap.
*/
mutex_enter(&vd->vdev_trim_io_lock);
while (vd->vdev_trim_inflight[0] > 0) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
mutex_exit(&vd->vdev_trim_io_lock);
return (error);
}
static void
-vdev_trim_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
+vdev_trim_xlate_last_rs_end(void *arg, zfs_range_seg64_t *physical_rs)
{
uint64_t *last_rs_end = (uint64_t *)arg;
if (physical_rs->rs_end > *last_rs_end)
*last_rs_end = physical_rs->rs_end;
}
static void
-vdev_trim_xlate_progress(void *arg, range_seg64_t *physical_rs)
+vdev_trim_xlate_progress(void *arg, zfs_range_seg64_t *physical_rs)
{
vdev_t *vd = (vdev_t *)arg;
uint64_t size = physical_rs->rs_end - physical_rs->rs_start;
vd->vdev_trim_bytes_est += size;
if (vd->vdev_trim_last_offset >= physical_rs->rs_end) {
vd->vdev_trim_bytes_done += size;
} else if (vd->vdev_trim_last_offset > physical_rs->rs_start &&
vd->vdev_trim_last_offset <= physical_rs->rs_end) {
vd->vdev_trim_bytes_done +=
vd->vdev_trim_last_offset - physical_rs->rs_start;
}
}
/*
* Calculates the completion percentage of a manual TRIM.
*/
static void
vdev_trim_calculate_progress(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
vd->vdev_trim_bytes_est = 0;
vd->vdev_trim_bytes_done = 0;
for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
mutex_enter(&msp->ms_lock);
uint64_t ms_free = (msp->ms_size -
metaslab_allocated_space(msp)) /
vdev_get_ndisks(vd->vdev_top);
/*
* Convert the metaslab range to a physical range
* on our vdev. We use this to determine if we are
* in the middle of this metaslab range.
*/
- range_seg64_t logical_rs, physical_rs, remain_rs;
+ zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = msp->ms_start;
logical_rs.rs_end = msp->ms_start + msp->ms_size;
/* Metaslab space after this offset has not been trimmed. */
vdev_xlate(vd, &logical_rs, &physical_rs, &remain_rs);
if (vd->vdev_trim_last_offset <= physical_rs.rs_start) {
vd->vdev_trim_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/* Metaslab space before this offset has been trimmed */
uint64_t last_rs_end = physical_rs.rs_end;
if (!vdev_xlate_is_empty(&remain_rs)) {
vdev_xlate_walk(vd, &remain_rs,
vdev_trim_xlate_last_rs_end, &last_rs_end);
}
if (vd->vdev_trim_last_offset > last_rs_end) {
vd->vdev_trim_bytes_done += ms_free;
vd->vdev_trim_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If we get here, we're in the middle of trimming this
* metaslab. Load it and walk the free tree for more
* accurate progress estimation.
*/
VERIFY0(metaslab_load(msp));
- range_tree_t *rt = msp->ms_allocatable;
+ zfs_range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t idx;
- for (range_seg_t *rs = zfs_btree_first(bt, &idx);
+ for (zfs_range_seg_t *rs = zfs_btree_first(bt, &idx);
rs != NULL; rs = zfs_btree_next(bt, &idx, &idx)) {
- logical_rs.rs_start = rs_get_start(rs, rt);
- logical_rs.rs_end = rs_get_end(rs, rt);
+ logical_rs.rs_start = zfs_rs_get_start(rs, rt);
+ logical_rs.rs_end = zfs_rs_get_end(rs, rt);
vdev_xlate_walk(vd, &logical_rs,
vdev_trim_xlate_progress, vd);
}
mutex_exit(&msp->ms_lock);
}
}
/*
* Load from disk the vdev's manual TRIM information. This includes the
* state, progress, and options provided when initiating the manual TRIM.
*/
static int
vdev_trim_load(vdev_t *vd)
{
int err = 0;
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE ||
vd->vdev_trim_state == VDEV_TRIM_SUSPENDED) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_LAST_OFFSET,
sizeof (vd->vdev_trim_last_offset), 1,
&vd->vdev_trim_last_offset);
if (err == ENOENT) {
vd->vdev_trim_last_offset = 0;
err = 0;
}
if (err == 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_RATE,
sizeof (vd->vdev_trim_rate), 1,
&vd->vdev_trim_rate);
if (err == ENOENT) {
vd->vdev_trim_rate = 0;
err = 0;
}
}
if (err == 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL,
sizeof (vd->vdev_trim_partial), 1,
&vd->vdev_trim_partial);
if (err == ENOENT) {
vd->vdev_trim_partial = 0;
err = 0;
}
}
if (err == 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE,
sizeof (vd->vdev_trim_secure), 1,
&vd->vdev_trim_secure);
if (err == ENOENT) {
vd->vdev_trim_secure = 0;
err = 0;
}
}
}
vdev_trim_calculate_progress(vd);
return (err);
}
static void
-vdev_trim_xlate_range_add(void *arg, range_seg64_t *physical_rs)
+vdev_trim_xlate_range_add(void *arg, zfs_range_seg64_t *physical_rs)
{
trim_args_t *ta = arg;
vdev_t *vd = ta->trim_vdev;
/*
* Only a manual trim will be traversing the vdev sequentially.
* For an auto trim all valid ranges should be added.
*/
if (ta->trim_type == TRIM_TYPE_MANUAL) {
/* Only add segments that we have not visited yet */
if (physical_rs->rs_end <= vd->vdev_trim_last_offset)
return;
/* Pick up where we left off mid-range. */
if (vd->vdev_trim_last_offset > physical_rs->rs_start) {
ASSERT3U(physical_rs->rs_end, >,
vd->vdev_trim_last_offset);
physical_rs->rs_start = vd->vdev_trim_last_offset;
}
}
ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start);
- range_tree_add(ta->trim_tree, physical_rs->rs_start,
+ zfs_range_tree_add(ta->trim_tree, physical_rs->rs_start,
physical_rs->rs_end - physical_rs->rs_start);
}
/*
* Convert the logical range into physical ranges and add them to the
* range tree passed in the trim_args_t.
*/
static void
vdev_trim_range_add(void *arg, uint64_t start, uint64_t size)
{
trim_args_t *ta = arg;
vdev_t *vd = ta->trim_vdev;
- range_seg64_t logical_rs;
+ zfs_range_seg64_t logical_rs;
logical_rs.rs_start = start;
logical_rs.rs_end = start + size;
/*
* Every range to be trimmed must be part of ms_allocatable.
* When ZFS_DEBUG_TRIM is set load the metaslab to verify this
* is always the case.
*/
if (zfs_flags & ZFS_DEBUG_TRIM) {
metaslab_t *msp = ta->trim_msp;
VERIFY0(metaslab_load(msp));
VERIFY3B(msp->ms_loaded, ==, B_TRUE);
- VERIFY(range_tree_contains(msp->ms_allocatable, start, size));
+ VERIFY(zfs_range_tree_contains(msp->ms_allocatable, start,
+ size));
}
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_xlate_walk(vd, &logical_rs, vdev_trim_xlate_range_add, arg);
}
/*
* Each manual TRIM thread is responsible for trimming the unallocated
* space for each leaf vdev. This is accomplished by sequentially iterating
* over its top-level metaslabs and issuing TRIM I/O for the space described
* by its ms_allocatable. While a metaslab is undergoing trimming it is
* not eligible for new allocations.
*/
static __attribute__((noreturn)) void
vdev_trim_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
trim_args_t ta;
int error = 0;
/*
* The VDEV_LEAF_ZAP_TRIM_* entries may have been updated by
* vdev_trim(). Wait for the updated values to be reflected
* in the zap in order to start with the requested settings.
*/
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
ASSERT(vdev_is_concrete(vd));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd->vdev_trim_last_offset = 0;
vd->vdev_trim_rate = 0;
vd->vdev_trim_partial = 0;
vd->vdev_trim_secure = 0;
VERIFY0(vdev_trim_load(vd));
ta.trim_vdev = vd;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = zfs_trim_extent_bytes_min;
- ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
+ ta.trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_flags = 0;
/*
* When a secure TRIM has been requested infer that the intent
* is that everything must be trimmed. Override the default
* minimum TRIM size to prevent ranges from being skipped.
*/
if (vd->vdev_trim_secure) {
ta.trim_flags |= ZIO_TRIM_SECURE;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
}
uint64_t ms_count = 0;
for (uint64_t i = 0; !vd->vdev_detached &&
i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
/*
* If we've expanded the top-level vdev or it's our
* first pass, calculate our progress.
*/
if (vd->vdev_top->vdev_ms_count != ms_count) {
vdev_trim_calculate_progress(vd);
ms_count = vd->vdev_top->vdev_ms_count;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_disable(msp);
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
/*
* If a partial TRIM was requested skip metaslabs which have
* never been initialized and thus have never been written.
*/
if (msp->ms_sm == NULL && vd->vdev_trim_partial) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_calculate_progress(vd);
continue;
}
ta.trim_msp = msp;
- range_tree_walk(msp->ms_allocatable, vdev_trim_range_add, &ta);
- range_tree_vacate(msp->ms_trim, NULL, NULL);
+ zfs_range_tree_walk(msp->ms_allocatable, vdev_trim_range_add,
+ &ta);
+ zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
mutex_exit(&msp->ms_lock);
error = vdev_trim_ranges(&ta);
metaslab_enable(msp, B_TRUE, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
- range_tree_vacate(ta.trim_tree, NULL, NULL);
+ zfs_range_tree_vacate(ta.trim_tree, NULL, NULL);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
- range_tree_destroy(ta.trim_tree);
+ zfs_range_tree_destroy(ta.trim_tree);
mutex_enter(&vd->vdev_trim_lock);
if (!vd->vdev_trim_exit_wanted) {
if (vdev_writeable(vd)) {
vdev_trim_change_state(vd, VDEV_TRIM_COMPLETE,
vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
} else if (vd->vdev_faulted) {
vdev_trim_change_state(vd, VDEV_TRIM_CANCELED,
vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
}
ASSERT(vd->vdev_trim_thread != NULL || vd->vdev_trim_inflight[0] == 0);
/*
* Drop the vdev_trim_lock while we sync out the txg since it's
* possible that a device might be trying to come online and must
* check to see if it needs to restart a trim. That thread will be
* holding the spa_config_lock which would prevent the txg_wait_synced
* from completing.
*/
mutex_exit(&vd->vdev_trim_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&vd->vdev_trim_lock);
vd->vdev_trim_thread = NULL;
cv_broadcast(&vd->vdev_trim_cv);
mutex_exit(&vd->vdev_trim_lock);
thread_exit();
}
/*
* Initiates a manual TRIM for the vdev_t. Callers must hold vdev_trim_lock,
* the vdev_t must be a leaf and cannot already be manually trimming.
*/
void
vdev_trim(vdev_t *vd, uint64_t rate, boolean_t partial, boolean_t secure)
{
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
ASSERT(!vd->vdev_rz_expanding);
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, rate, partial, secure);
vd->vdev_trim_thread = thread_create(NULL, 0,
vdev_trim_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
}
/*
* Wait for the trimming thread to be terminated (canceled or stopped).
*/
static void
vdev_trim_stop_wait_impl(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
while (vd->vdev_trim_thread != NULL)
cv_wait(&vd->vdev_trim_cv, &vd->vdev_trim_lock);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
vd->vdev_trim_exit_wanted = B_FALSE;
}
/*
* Wait for vdev trim threads which were listed to cleanly exit.
*/
void
vdev_trim_stop_wait(spa_t *spa, list_t *vd_list)
{
(void) spa;
vdev_t *vd;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_export_thread == curthread);
while ((vd = list_remove_head(vd_list)) != NULL) {
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop_wait_impl(vd);
mutex_exit(&vd->vdev_trim_lock);
}
}
/*
* Stop trimming a device, with the resultant trimming state being tgt_state.
* For blocking behavior pass NULL for vd_list. Otherwise, when a list_t is
* provided the stopping vdev is inserted in to the list. Callers are then
* required to call vdev_trim_stop_wait() to block for all the trim threads
* to exit. The caller must hold vdev_trim_lock and must not be writing to
* the spa config, as the trimming thread may try to enter the config as a
* reader before exiting.
*/
void
vdev_trim_stop(vdev_t *vd, vdev_trim_state_t tgt_state, list_t *vd_list)
{
ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER));
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
/*
* Allow cancel requests to proceed even if the trim thread has
* stopped.
*/
if (vd->vdev_trim_thread == NULL && tgt_state != VDEV_TRIM_CANCELED)
return;
vdev_trim_change_state(vd, tgt_state, 0, 0, 0);
vd->vdev_trim_exit_wanted = B_TRUE;
if (vd_list == NULL) {
vdev_trim_stop_wait_impl(vd);
} else {
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
vd->vdev_spa->spa_export_thread == curthread);
list_insert_tail(vd_list, vd);
}
}
/*
* Requests that all listed vdevs stop trimming.
*/
static void
vdev_trim_stop_all_impl(vdev_t *vd, vdev_trim_state_t tgt_state,
list_t *vd_list)
{
if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, tgt_state, vd_list);
mutex_exit(&vd->vdev_trim_lock);
return;
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_trim_stop_all_impl(vd->vdev_child[i], tgt_state,
vd_list);
}
}
/*
* Convenience function to stop trimming of a vdev tree and set all trim
* thread pointers to NULL.
*/
void
vdev_trim_stop_all(vdev_t *vd, vdev_trim_state_t tgt_state)
{
spa_t *spa = vd->vdev_spa;
list_t vd_list;
vdev_t *vd_l2cache;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_export_thread == curthread);
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
vdev_trim_stop_all_impl(vd, tgt_state, &vd_list);
/*
* Iterate over cache devices and request stop trimming the
* whole device in case we export the pool or remove the cache
* device prematurely.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd_l2cache = spa->spa_l2cache.sav_vdevs[i];
vdev_trim_stop_all_impl(vd_l2cache, tgt_state, &vd_list);
}
vdev_trim_stop_wait(spa, &vd_list);
if (vd->vdev_spa->spa_sync_on) {
/* Make sure that our state has been synced to disk */
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
}
list_destroy(&vd_list);
}
/*
* Conditionally restarts a manual TRIM given its on-disk state.
*/
void
vdev_trim_restart(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
vd->vdev_spa->spa_load_thread == curthread);
ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_leaf_zap != 0) {
mutex_enter(&vd->vdev_trim_lock);
uint64_t trim_state = VDEV_TRIM_NONE;
int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE,
sizeof (trim_state), 1, &trim_state);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_trim_state = trim_state;
uint64_t timestamp = 0;
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_ACTION_TIME,
sizeof (timestamp), 1, &timestamp);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_trim_action_time = timestamp;
if ((vd->vdev_trim_state == VDEV_TRIM_SUSPENDED ||
vd->vdev_offline) && !vd->vdev_top->vdev_rz_expanding) {
/* load progress for reporting, but don't resume */
VERIFY0(vdev_trim_load(vd));
} else if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE &&
vdev_writeable(vd) && !vd->vdev_top->vdev_removing &&
!vd->vdev_top->vdev_rz_expanding &&
vd->vdev_trim_thread == NULL) {
VERIFY0(vdev_trim_load(vd));
vdev_trim(vd, vd->vdev_trim_rate,
vd->vdev_trim_partial, vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_trim_restart(vd->vdev_child[i]);
}
}
/*
* Used by the automatic TRIM when ZFS_DEBUG_TRIM is set to verify that
* every TRIM range is contained within ms_allocatable.
*/
static void
vdev_trim_range_verify(void *arg, uint64_t start, uint64_t size)
{
trim_args_t *ta = arg;
metaslab_t *msp = ta->trim_msp;
VERIFY3B(msp->ms_loaded, ==, B_TRUE);
VERIFY3U(msp->ms_disabled, >, 0);
- VERIFY(range_tree_contains(msp->ms_allocatable, start, size));
+ VERIFY(zfs_range_tree_contains(msp->ms_allocatable, start, size));
}
/*
* Each automatic TRIM thread is responsible for managing the trimming of a
* top-level vdev in the pool. No automatic TRIM state is maintained on-disk.
*
* N.B. This behavior is different from a manual TRIM where a thread
* is created for each leaf vdev, instead of each top-level vdev.
*/
static __attribute__((noreturn)) void
vdev_autotrim_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
int shift = 0;
mutex_enter(&vd->vdev_autotrim_lock);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(vd->vdev_autotrim_thread, !=, NULL);
mutex_exit(&vd->vdev_autotrim_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
while (!vdev_autotrim_should_stop(vd)) {
int txgs_per_trim = MAX(zfs_trim_txg_batch, 1);
uint64_t extent_bytes_max = zfs_trim_extent_bytes_max;
uint64_t extent_bytes_min = zfs_trim_extent_bytes_min;
/*
* All of the metaslabs are divided in to groups of size
* num_metaslabs / zfs_trim_txg_batch. Each of these groups
* is composed of metaslabs which are spread evenly over the
* device.
*
* For example, when zfs_trim_txg_batch = 32 (default) then
* group 0 will contain metaslabs 0, 32, 64, ...;
* group 1 will contain metaslabs 1, 33, 65, ...;
* group 2 will contain metaslabs 2, 34, 66, ...; and so on.
*
* On each pass through the while() loop one of these groups
* is selected. This is accomplished by using a shift value
* to select the starting metaslab, then striding over the
* metaslabs using the zfs_trim_txg_batch size. This is
* done to accomplish two things.
*
* 1) By dividing the metaslabs in to groups, and making sure
* that each group takes a minimum of one txg to process.
* Then zfs_trim_txg_batch controls the minimum number of
* txgs which must occur before a metaslab is revisited.
*
* 2) Selecting non-consecutive metaslabs distributes the
* TRIM commands for a group evenly over the entire device.
* This can be advantageous for certain types of devices.
*/
for (uint64_t i = shift % txgs_per_trim; i < vd->vdev_ms_count;
i += txgs_per_trim) {
metaslab_t *msp = vd->vdev_ms[i];
- range_tree_t *trim_tree;
+ zfs_range_tree_t *trim_tree;
boolean_t issued_trim = B_FALSE;
boolean_t wait_aborted = B_FALSE;
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_disable(msp);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&msp->ms_lock);
/*
* Skip the metaslab when it has never been allocated
* or when there are no recent frees to trim.
*/
if (msp->ms_sm == NULL ||
- range_tree_is_empty(msp->ms_trim)) {
+ zfs_range_tree_is_empty(msp->ms_trim)) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
continue;
}
/*
* Skip the metaslab when it has already been disabled.
* This may happen when a manual TRIM or initialize
* operation is running concurrently. In the case
* of a manual TRIM, the ms_trim tree will have been
* vacated. Only ranges added after the manual TRIM
* disabled the metaslab will be included in the tree.
* These will be processed when the automatic TRIM
* next revisits this metaslab.
*/
if (msp->ms_disabled > 1) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
continue;
}
/*
* Allocate an empty range tree which is swapped in
* for the existing ms_trim tree while it is processed.
*/
- trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL,
- 0, 0);
- range_tree_swap(&msp->ms_trim, &trim_tree);
- ASSERT(range_tree_is_empty(msp->ms_trim));
+ trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
+ NULL, 0, 0);
+ zfs_range_tree_swap(&msp->ms_trim, &trim_tree);
+ ASSERT(zfs_range_tree_is_empty(msp->ms_trim));
/*
* There are two cases when constructing the per-vdev
* trim trees for a metaslab. If the top-level vdev
* has no children then it is also a leaf and should
* be trimmed. Otherwise our children are the leaves
* and a trim tree should be constructed for each.
*/
trim_args_t *tap;
uint64_t children = vd->vdev_children;
if (children == 0) {
children = 1;
tap = kmem_zalloc(sizeof (trim_args_t) *
children, KM_SLEEP);
tap[0].trim_vdev = vd;
} else {
tap = kmem_zalloc(sizeof (trim_args_t) *
children, KM_SLEEP);
for (uint64_t c = 0; c < children; c++) {
tap[c].trim_vdev = vd->vdev_child[c];
}
}
for (uint64_t c = 0; c < children; c++) {
trim_args_t *ta = &tap[c];
vdev_t *cvd = ta->trim_vdev;
ta->trim_msp = msp;
ta->trim_extent_bytes_max = extent_bytes_max;
ta->trim_extent_bytes_min = extent_bytes_min;
ta->trim_type = TRIM_TYPE_AUTO;
ta->trim_flags = 0;
if (cvd->vdev_detached ||
!vdev_writeable(cvd) ||
!cvd->vdev_has_trim ||
cvd->vdev_trim_thread != NULL) {
continue;
}
/*
* When a device has an attached hot spare, or
* is being replaced it will not be trimmed.
* This is done to avoid adding additional
* stress to a potentially unhealthy device,
* and to minimize the required rebuild time.
*/
if (!cvd->vdev_ops->vdev_op_leaf)
continue;
- ta->trim_tree = range_tree_create(NULL,
- RANGE_SEG64, NULL, 0, 0);
- range_tree_walk(trim_tree,
+ ta->trim_tree = zfs_range_tree_create(NULL,
+ ZFS_RANGE_SEG64, NULL, 0, 0);
+ zfs_range_tree_walk(trim_tree,
vdev_trim_range_add, ta);
}
mutex_exit(&msp->ms_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* Issue the TRIM I/Os for all ranges covered by the
* TRIM trees. These ranges are safe to TRIM because
* no new allocations will be performed until the call
* to metaslab_enabled() below.
*/
for (uint64_t c = 0; c < children; c++) {
trim_args_t *ta = &tap[c];
/*
* Always yield to a manual TRIM if one has
* been started for the child vdev.
*/
if (ta->trim_tree == NULL ||
ta->trim_vdev->vdev_trim_thread != NULL) {
continue;
}
/*
* After this point metaslab_enable() must be
* called with the sync flag set. This is done
* here because vdev_trim_ranges() is allowed
* to be interrupted (EINTR) before issuing all
* of the required TRIM I/Os.
*/
issued_trim = B_TRUE;
int error = vdev_trim_ranges(ta);
if (error)
break;
}
/*
* Verify every range which was trimmed is still
* contained within the ms_allocatable tree.
*/
if (zfs_flags & ZFS_DEBUG_TRIM) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
VERIFY3P(tap[0].trim_msp, ==, msp);
- range_tree_walk(trim_tree,
+ zfs_range_tree_walk(trim_tree,
vdev_trim_range_verify, &tap[0]);
mutex_exit(&msp->ms_lock);
}
- range_tree_vacate(trim_tree, NULL, NULL);
- range_tree_destroy(trim_tree);
+ zfs_range_tree_vacate(trim_tree, NULL, NULL);
+ zfs_range_tree_destroy(trim_tree);
/*
* Wait for couples of kicks, to ensure the trim io is
* synced. If the wait is aborted due to
* vdev_autotrim_exit_wanted, we need to signal
* metaslab_enable() to wait for sync.
*/
if (issued_trim) {
wait_aborted = vdev_autotrim_wait_kick(vd,
TXG_CONCURRENT_STATES + TXG_DEFER_SIZE);
}
metaslab_enable(msp, wait_aborted, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (uint64_t c = 0; c < children; c++) {
trim_args_t *ta = &tap[c];
if (ta->trim_tree == NULL)
continue;
- range_tree_vacate(ta->trim_tree, NULL, NULL);
- range_tree_destroy(ta->trim_tree);
+ zfs_range_tree_vacate(ta->trim_tree, NULL,
+ NULL);
+ zfs_range_tree_destroy(ta->trim_tree);
}
kmem_free(tap, sizeof (trim_args_t) * children);
if (vdev_autotrim_should_stop(vd))
break;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
vdev_autotrim_wait_kick(vd, 1);
shift++;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_trim_io_lock);
while (cvd->vdev_trim_inflight[1] > 0) {
cv_wait(&cvd->vdev_trim_io_cv,
&cvd->vdev_trim_io_lock);
}
mutex_exit(&cvd->vdev_trim_io_lock);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* When exiting because the autotrim property was set to off, then
* abandon any unprocessed ms_trim ranges to reclaim the memory.
*/
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_OFF) {
for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_ms[i];
mutex_enter(&msp->ms_lock);
- range_tree_vacate(msp->ms_trim, NULL, NULL);
+ zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
mutex_exit(&msp->ms_lock);
}
}
mutex_enter(&vd->vdev_autotrim_lock);
ASSERT(vd->vdev_autotrim_thread != NULL);
vd->vdev_autotrim_thread = NULL;
cv_broadcast(&vd->vdev_autotrim_cv);
mutex_exit(&vd->vdev_autotrim_lock);
thread_exit();
}
/*
* Starts an autotrim thread, if needed, for each top-level vdev which can be
* trimmed. A top-level vdev which has been evacuated will never be trimmed.
*/
void
vdev_autotrim(spa_t *spa)
{
vdev_t *root_vd = spa->spa_root_vdev;
for (uint64_t i = 0; i < root_vd->vdev_children; i++) {
vdev_t *tvd = root_vd->vdev_child[i];
mutex_enter(&tvd->vdev_autotrim_lock);
if (vdev_writeable(tvd) && !tvd->vdev_removing &&
tvd->vdev_autotrim_thread == NULL &&
!tvd->vdev_rz_expanding) {
ASSERT3P(tvd->vdev_top, ==, tvd);
tvd->vdev_autotrim_thread = thread_create(NULL, 0,
vdev_autotrim_thread, tvd, 0, &p0, TS_RUN,
maxclsyspri);
ASSERT(tvd->vdev_autotrim_thread != NULL);
}
mutex_exit(&tvd->vdev_autotrim_lock);
}
}
/*
* Wait for the vdev_autotrim_thread associated with the passed top-level
* vdev to be terminated (canceled or stopped).
*/
void
vdev_autotrim_stop_wait(vdev_t *tvd)
{
mutex_enter(&tvd->vdev_autotrim_lock);
if (tvd->vdev_autotrim_thread != NULL) {
tvd->vdev_autotrim_exit_wanted = B_TRUE;
cv_broadcast(&tvd->vdev_autotrim_kick_cv);
cv_wait(&tvd->vdev_autotrim_cv,
&tvd->vdev_autotrim_lock);
ASSERT3P(tvd->vdev_autotrim_thread, ==, NULL);
tvd->vdev_autotrim_exit_wanted = B_FALSE;
}
mutex_exit(&tvd->vdev_autotrim_lock);
}
void
vdev_autotrim_kick(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
vdev_t *root_vd = spa->spa_root_vdev;
vdev_t *tvd;
for (uint64_t i = 0; i < root_vd->vdev_children; i++) {
tvd = root_vd->vdev_child[i];
mutex_enter(&tvd->vdev_autotrim_lock);
if (tvd->vdev_autotrim_thread != NULL)
cv_broadcast(&tvd->vdev_autotrim_kick_cv);
mutex_exit(&tvd->vdev_autotrim_lock);
}
}
/*
* Wait for all of the vdev_autotrim_thread associated with the pool to
* be terminated (canceled or stopped).
*/
void
vdev_autotrim_stop_all(spa_t *spa)
{
vdev_t *root_vd = spa->spa_root_vdev;
for (uint64_t i = 0; i < root_vd->vdev_children; i++)
vdev_autotrim_stop_wait(root_vd->vdev_child[i]);
}
/*
* Conditionally restart all of the vdev_autotrim_thread's for the pool.
*/
void
vdev_autotrim_restart(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
spa->spa_load_thread == curthread);
if (spa->spa_autotrim)
vdev_autotrim(spa);
}
static __attribute__((noreturn)) void
vdev_trim_l2arc_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
l2arc_dev_t *dev = l2arc_vdev_get(vd);
trim_args_t ta = {0};
- range_seg64_t physical_rs;
+ zfs_range_seg64_t physical_rs;
ASSERT(vdev_is_concrete(vd));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd->vdev_trim_last_offset = 0;
vd->vdev_trim_rate = 0;
vd->vdev_trim_partial = 0;
vd->vdev_trim_secure = 0;
ta.trim_vdev = vd;
- ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
+ ta.trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
ta.trim_flags = 0;
physical_rs.rs_start = vd->vdev_trim_bytes_done = 0;
physical_rs.rs_end = vd->vdev_trim_bytes_est =
vdev_get_min_asize(vd);
- range_tree_add(ta.trim_tree, physical_rs.rs_start,
+ zfs_range_tree_add(ta.trim_tree, physical_rs.rs_start,
physical_rs.rs_end - physical_rs.rs_start);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, 0, 0, 0);
mutex_exit(&vd->vdev_trim_lock);
(void) vdev_trim_ranges(&ta);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_enter(&vd->vdev_trim_io_lock);
while (vd->vdev_trim_inflight[TRIM_TYPE_MANUAL] > 0) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
mutex_exit(&vd->vdev_trim_io_lock);
- range_tree_vacate(ta.trim_tree, NULL, NULL);
- range_tree_destroy(ta.trim_tree);
+ zfs_range_tree_vacate(ta.trim_tree, NULL, NULL);
+ zfs_range_tree_destroy(ta.trim_tree);
mutex_enter(&vd->vdev_trim_lock);
if (!vd->vdev_trim_exit_wanted && vdev_writeable(vd)) {
vdev_trim_change_state(vd, VDEV_TRIM_COMPLETE,
vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
ASSERT(vd->vdev_trim_thread != NULL ||
vd->vdev_trim_inflight[TRIM_TYPE_MANUAL] == 0);
/*
* Drop the vdev_trim_lock while we sync out the txg since it's
* possible that a device might be trying to come online and
* must check to see if it needs to restart a trim. That thread
* will be holding the spa_config_lock which would prevent the
* txg_wait_synced from completing. Same strategy as in
* vdev_trim_thread().
*/
mutex_exit(&vd->vdev_trim_lock);
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
mutex_enter(&vd->vdev_trim_lock);
/*
* Update the header of the cache device here, before
* broadcasting vdev_trim_cv which may lead to the removal
* of the device. The same applies for setting l2ad_trim_all to
* false.
*/
spa_config_enter(vd->vdev_spa, SCL_L2ARC, vd,
RW_READER);
memset(dev->l2ad_dev_hdr, 0, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
spa_config_exit(vd->vdev_spa, SCL_L2ARC, vd);
vd->vdev_trim_thread = NULL;
if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE)
dev->l2ad_trim_all = B_FALSE;
cv_broadcast(&vd->vdev_trim_cv);
mutex_exit(&vd->vdev_trim_lock);
thread_exit();
}
/*
* Punches out TRIM threads for the L2ARC devices in a spa and assigns them
* to vd->vdev_trim_thread variable. This facilitates the management of
* trimming the whole cache device using TRIM_TYPE_MANUAL upon addition
* to a pool or pool creation or when the header of the device is invalid.
*/
void
vdev_trim_l2arc(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off TRIM threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_t *vd = spa->spa_l2cache.sav_vdevs[i];
l2arc_dev_t *dev = l2arc_vdev_get(vd);
if (dev == NULL || !dev->l2ad_trim_all) {
/*
* Don't attempt TRIM if the vdev is UNAVAIL or if the
* cache device was not marked for whole device TRIM
* (ie l2arc_trim_ahead = 0, or the L2ARC device header
* is valid with trim_state = VDEV_TRIM_COMPLETE and
* l2ad_log_entries > 0).
*/
continue;
}
mutex_enter(&vd->vdev_trim_lock);
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, 0, 0, 0);
vd->vdev_trim_thread = thread_create(NULL, 0,
vdev_trim_l2arc_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&vd->vdev_trim_lock);
}
}
/*
* A wrapper which calls vdev_trim_ranges(). It is intended to be called
* on leaf vdevs.
*/
int
vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
{
trim_args_t ta = {0};
- range_seg64_t physical_rs;
+ zfs_range_seg64_t physical_rs;
int error;
physical_rs.rs_start = start;
physical_rs.rs_end = start + size;
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_top->vdev_removing);
ASSERT(!vd->vdev_top->vdev_rz_expanding);
ta.trim_vdev = vd;
- ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
+ ta.trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_SIMPLE;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
ta.trim_flags = 0;
ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start);
if (physical_rs.rs_end > physical_rs.rs_start) {
- range_tree_add(ta.trim_tree, physical_rs.rs_start,
+ zfs_range_tree_add(ta.trim_tree, physical_rs.rs_start,
physical_rs.rs_end - physical_rs.rs_start);
} else {
ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start);
}
error = vdev_trim_ranges(&ta);
mutex_enter(&vd->vdev_trim_io_lock);
while (vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE] > 0) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
mutex_exit(&vd->vdev_trim_io_lock);
- range_tree_vacate(ta.trim_tree, NULL, NULL);
- range_tree_destroy(ta.trim_tree);
+ zfs_range_tree_vacate(ta.trim_tree, NULL, NULL);
+ zfs_range_tree_destroy(ta.trim_tree);
return (error);
}
EXPORT_SYMBOL(vdev_trim);
EXPORT_SYMBOL(vdev_trim_stop);
EXPORT_SYMBOL(vdev_trim_stop_all);
EXPORT_SYMBOL(vdev_trim_stop_wait);
EXPORT_SYMBOL(vdev_trim_restart);
EXPORT_SYMBOL(vdev_autotrim);
EXPORT_SYMBOL(vdev_autotrim_stop_all);
EXPORT_SYMBOL(vdev_autotrim_stop_wait);
EXPORT_SYMBOL(vdev_autotrim_restart);
EXPORT_SYMBOL(vdev_trim_l2arc);
EXPORT_SYMBOL(vdev_trim_simple);
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, extent_bytes_max, UINT, ZMOD_RW,
"Max size of TRIM commands, larger will be split");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, extent_bytes_min, UINT, ZMOD_RW,
"Min size of TRIM commands, smaller will be skipped");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, metaslab_skip, UINT, ZMOD_RW,
"Skip metaslabs which have never been initialized");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, txg_batch, UINT, ZMOD_RW,
"Min number of txgs to aggregate frees before issuing TRIM");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, queue_limit, UINT, ZMOD_RW,
"Max queued TRIMs outstanding per leaf vdev");
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index bd6752f00ac5..b071ac17ed1f 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -1,5728 +1,5816 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2022 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
- * Copyright (c) 2019, 2023, 2024, Klara Inc.
+ * Copyright (c) 2019, 2023, 2024, 2025, Klara, Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2021, Datto, Inc.
* Copyright (c) 2021, 2024 by George Melikov. All rights reserved.
*/
#include <sys/sysmacros.h>
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/zio_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/blkptr.h>
#include <sys/zfeature.h>
#include <sys/dsl_scan.h>
#include <sys/metaslab_impl.h>
#include <sys/time.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
#include <sys/dsl_crypt.h>
#include <cityhash.h>
/*
* ==========================================================================
* I/O type descriptions
* ==========================================================================
*/
const char *const zio_type_name[ZIO_TYPES] = {
/*
* Note: Linux kernel thread name length is limited
* so these names will differ from upstream open zfs.
*/
"z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim"
};
int zio_dva_throttle_enabled = B_TRUE;
static int zio_deadman_log_all = B_FALSE;
/*
* ==========================================================================
* I/O kmem caches
* ==========================================================================
*/
static kmem_cache_t *zio_cache;
static kmem_cache_t *zio_link_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#endif
/* Mark IOs as "slow" if they take longer than 30 seconds */
static uint_t zio_slow_io_ms = (30 * MILLISEC);
#define BP_SPANB(indblkshift, level) \
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
#define COMPARE_META_LEVEL 0x80000000ul
/*
* The following actions directly effect the spa's sync-to-convergence logic.
* The values below define the sync pass when we start performing the action.
* Care should be taken when changing these values as they directly impact
* spa_sync() performance. Tuning these values may introduce subtle performance
* pathologies and should only be done in the context of performance analysis.
* These tunables will eventually be removed and replaced with #defines once
* enough analysis has been done to determine optimal values.
*
* The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
* regular blocks are not deferred.
*
* Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
* compression (including of metadata). In practice, we don't have this
* many sync passes, so this has no effect.
*
* The original intent was that disabling compression would help the sync
* passes to converge. However, in practice disabling compression increases
* the average number of sync passes, because when we turn compression off, a
* lot of block's size will change and thus we have to re-allocate (not
* overwrite) them. It also increases the number of 128KB allocations (e.g.
* for indirect blocks and spacemaps) because these will not be compressed.
* The 128K allocations are especially detrimental to performance on highly
* fragmented systems, which may have very few free segments of this size,
* and may need to load new metaslabs to satisfy 128K allocations.
*/
/* defer frees starting in this pass */
uint_t zfs_sync_pass_deferred_free = 2;
/* don't compress starting in this pass */
static uint_t zfs_sync_pass_dont_compress = 8;
/* rewrite new bps starting in this pass */
static uint_t zfs_sync_pass_rewrite = 2;
/*
* An allocating zio is one that either currently has the DVA allocate
* stage set or will have it later in its lifetime.
*/
#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
/*
* Enable smaller cores by excluding metadata
* allocations as well.
*/
int zio_exclude_metadata = 0;
static int zio_requeue_io_start_cut_in_line = 1;
#ifdef ZFS_DEBUG
static const int zio_buf_debug_limit = 16384;
#else
static const int zio_buf_debug_limit = 0;
#endif
+typedef struct zio_stats {
+ kstat_named_t ziostat_total_allocations;
+ kstat_named_t ziostat_alloc_class_fallbacks;
+ kstat_named_t ziostat_gang_writes;
+ kstat_named_t ziostat_gang_multilevel;
+} zio_stats_t;
+
+static zio_stats_t zio_stats = {
+ { "total_allocations", KSTAT_DATA_UINT64 },
+ { "alloc_class_fallbacks", KSTAT_DATA_UINT64 },
+ { "gang_writes", KSTAT_DATA_UINT64 },
+ { "gang_multilevel", KSTAT_DATA_UINT64 },
+};
+
+struct {
+ wmsum_t ziostat_total_allocations;
+ wmsum_t ziostat_alloc_class_fallbacks;
+ wmsum_t ziostat_gang_writes;
+ wmsum_t ziostat_gang_multilevel;
+} ziostat_sums;
+
+#define ZIOSTAT_BUMP(stat) wmsum_add(&ziostat_sums.stat, 1);
+
+static kstat_t *zio_ksp;
+
static inline void __zio_execute(zio_t *zio);
static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
+static int
+zio_kstats_update(kstat_t *ksp, int rw)
+{
+ zio_stats_t *zs = ksp->ks_data;
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+
+ zs->ziostat_total_allocations.value.ui64 =
+ wmsum_value(&ziostat_sums.ziostat_total_allocations);
+ zs->ziostat_alloc_class_fallbacks.value.ui64 =
+ wmsum_value(&ziostat_sums.ziostat_alloc_class_fallbacks);
+ zs->ziostat_gang_writes.value.ui64 =
+ wmsum_value(&ziostat_sums.ziostat_gang_writes);
+ zs->ziostat_gang_multilevel.value.ui64 =
+ wmsum_value(&ziostat_sums.ziostat_gang_multilevel);
+ return (0);
+}
+
void
zio_init(void)
{
size_t c;
zio_cache = kmem_cache_create("zio_cache",
sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
+ wmsum_init(&ziostat_sums.ziostat_total_allocations, 0);
+ wmsum_init(&ziostat_sums.ziostat_alloc_class_fallbacks, 0);
+ wmsum_init(&ziostat_sums.ziostat_gang_writes, 0);
+ wmsum_init(&ziostat_sums.ziostat_gang_multilevel, 0);
+ zio_ksp = kstat_create("zfs", 0, "zio_stats",
+ "misc", KSTAT_TYPE_NAMED, sizeof (zio_stats) /
+ sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
+ if (zio_ksp != NULL) {
+ zio_ksp->ks_data = &zio_stats;
+ zio_ksp->ks_update = zio_kstats_update;
+ kstat_install(zio_ksp);
+ }
+
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
size_t align, cflags, data_cflags;
char name[32];
/*
* Create cache for each half-power of 2 size, starting from
* SPA_MINBLOCKSIZE. It should give us memory space efficiency
* of ~7/8, sufficient for transient allocations mostly using
* these caches.
*/
size_t p2 = size;
while (!ISP2(p2))
p2 &= p2 - 1;
if (!IS_P2ALIGNED(size, p2 / 2))
continue;
#ifndef _KERNEL
/*
* If we are using watchpoints, put each buffer on its own page,
* to eliminate the performance overhead of trapping to the
* kernel when modifying a non-watched buffer that shares the
* page with a watched buffer.
*/
if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
continue;
#endif
if (IS_P2ALIGNED(size, PAGESIZE))
align = PAGESIZE;
else
align = 1 << (highbit64(size ^ (size - 1)) - 1);
cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
KMC_NODEBUG : 0;
data_cflags = KMC_NODEBUG;
if (abd_size_alloc_linear(size)) {
cflags |= KMC_RECLAIMABLE;
data_cflags |= KMC_RECLAIMABLE;
}
if (cflags == data_cflags) {
/*
* Resulting kmem caches would be identical.
* Save memory by creating only one.
*/
(void) snprintf(name, sizeof (name),
"zio_buf_comb_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size, align,
NULL, NULL, NULL, NULL, NULL, cflags);
zio_data_buf_cache[c] = zio_buf_cache[c];
continue;
}
(void) snprintf(name, sizeof (name), "zio_buf_%lu",
(ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size, align,
NULL, NULL, NULL, NULL, NULL, cflags);
(void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
(ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size, align,
NULL, NULL, NULL, NULL, NULL, data_cflags);
}
while (--c != 0) {
ASSERT(zio_buf_cache[c] != NULL);
if (zio_buf_cache[c - 1] == NULL)
zio_buf_cache[c - 1] = zio_buf_cache[c];
ASSERT(zio_data_buf_cache[c] != NULL);
if (zio_data_buf_cache[c - 1] == NULL)
zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
}
zio_inject_init();
lz4_init();
}
void
zio_fini(void)
{
size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
for (size_t i = 0; i < n; i++) {
if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
(void) printf("zio_fini: [%d] %llu != %llu\n",
(int)((i + 1) << SPA_MINBLOCKSHIFT),
(long long unsigned)zio_buf_cache_allocs[i],
(long long unsigned)zio_buf_cache_frees[i]);
}
#endif
/*
* The same kmem cache can show up multiple times in both zio_buf_cache
* and zio_data_buf_cache. Do a wasteful but trivially correct scan to
* sort it out.
*/
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_buf_cache[j])
zio_buf_cache[j] = NULL;
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_data_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
VERIFY3P(zio_buf_cache[i], ==, NULL);
VERIFY3P(zio_data_buf_cache[i], ==, NULL);
}
+ if (zio_ksp != NULL) {
+ kstat_delete(zio_ksp);
+ zio_ksp = NULL;
+ }
+
+ wmsum_fini(&ziostat_sums.ziostat_total_allocations);
+ wmsum_fini(&ziostat_sums.ziostat_alloc_class_fallbacks);
+ wmsum_fini(&ziostat_sums.ziostat_gang_writes);
+ wmsum_fini(&ziostat_sums.ziostat_gang_multilevel);
+
kmem_cache_destroy(zio_link_cache);
kmem_cache_destroy(zio_cache);
zio_inject_fini();
lz4_fini();
}
/*
* ==========================================================================
* Allocate and free I/O buffers
* ==========================================================================
*/
#if defined(ZFS_DEBUG) && defined(_KERNEL)
#define ZFS_ZIO_BUF_CANARY 1
#endif
#ifdef ZFS_ZIO_BUF_CANARY
static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
/*
* Use empty space after the buffer to detect overflows.
*
* Since zio_init() creates kmem caches only for certain set of buffer sizes,
* allocations of different sizes may have some unused space after the data.
* Filling part of that space with a known pattern on allocation and checking
* it on free should allow us to detect some buffer overflows.
*/
static void
zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
{
size_t off = P2ROUNDUP(size, sizeof (ulong_t));
ulong_t *canary = p + off / sizeof (ulong_t);
size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
cache[c] == cache[c + 1])
asize = (c + 2) << SPA_MINBLOCKSHIFT;
for (; off < asize; canary++, off += sizeof (ulong_t))
*canary = zio_buf_canary;
}
static void
zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
{
size_t off = P2ROUNDUP(size, sizeof (ulong_t));
ulong_t *canary = p + off / sizeof (ulong_t);
size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
cache[c] == cache[c + 1])
asize = (c + 2) << SPA_MINBLOCKSHIFT;
for (; off < asize; canary++, off += sizeof (ulong_t)) {
if (unlikely(*canary != zio_buf_canary)) {
PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
p, size, (canary - p) * sizeof (ulong_t),
*canary, zio_buf_canary);
}
}
}
#endif
/*
* Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
* crashdump if the kernel panics, so use it judiciously. Obviously, it's
* useful to inspect ZFS metadata, but if possible, we should avoid keeping
* excess / transient data in-core during a crashdump.
*/
void *
zio_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_allocs[c], 1);
#endif
void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
#ifdef ZFS_ZIO_BUF_CANARY
zio_buf_put_canary(p, size, zio_buf_cache, c);
#endif
return (p);
}
/*
* Use zio_data_buf_alloc to allocate data. The data will not appear in a
* crashdump if the kernel panics. This exists so that we will limit the amount
* of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
* of kernel heap dumped to disk when the kernel panics)
*/
void *
zio_data_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
#ifdef ZFS_ZIO_BUF_CANARY
zio_buf_put_canary(p, size, zio_data_buf_cache, c);
#endif
return (p);
}
void
zio_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_frees[c], 1);
#endif
#ifdef ZFS_ZIO_BUF_CANARY
zio_buf_check_canary(buf, size, zio_buf_cache, c);
#endif
kmem_cache_free(zio_buf_cache[c], buf);
}
void
zio_data_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#ifdef ZFS_ZIO_BUF_CANARY
zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
#endif
kmem_cache_free(zio_data_buf_cache[c], buf);
}
static void
zio_abd_free(void *abd, size_t size)
{
(void) size;
abd_free((abd_t *)abd);
}
/*
* ==========================================================================
* Push and pop I/O transform buffers
* ==========================================================================
*/
void
zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
zio_transform_func_t *transform)
{
zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
zt->zt_orig_abd = zio->io_abd;
zt->zt_orig_size = zio->io_size;
zt->zt_bufsize = bufsize;
zt->zt_transform = transform;
zt->zt_next = zio->io_transform_stack;
zio->io_transform_stack = zt;
zio->io_abd = data;
zio->io_size = size;
}
void
zio_pop_transforms(zio_t *zio)
{
zio_transform_t *zt;
while ((zt = zio->io_transform_stack) != NULL) {
if (zt->zt_transform != NULL)
zt->zt_transform(zio,
zt->zt_orig_abd, zt->zt_orig_size);
if (zt->zt_bufsize != 0)
abd_free(zio->io_abd);
zio->io_abd = zt->zt_orig_abd;
zio->io_size = zt->zt_orig_size;
zio->io_transform_stack = zt->zt_next;
kmem_free(zt, sizeof (zio_transform_t));
}
}
/*
* ==========================================================================
* I/O transform callbacks for subblocks, decompression, and decryption
* ==========================================================================
*/
static void
zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
{
ASSERT(zio->io_size > size);
if (zio->io_type == ZIO_TYPE_READ)
abd_copy(data, zio->io_abd, size);
}
static void
zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
{
if (zio->io_error == 0) {
int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
zio->io_abd, data, zio->io_size, size,
&zio->io_prop.zp_complevel);
if (zio_injection_enabled && ret == 0)
ret = zio_handle_fault_injection(zio, EINVAL);
if (ret != 0)
zio->io_error = SET_ERROR(EIO);
}
}
static void
zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
{
int ret;
void *tmp;
blkptr_t *bp = zio->io_bp;
spa_t *spa = zio->io_spa;
uint64_t dsobj = zio->io_bookmark.zb_objset;
uint64_t lsize = BP_GET_LSIZE(bp);
dmu_object_type_t ot = BP_GET_TYPE(bp);
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(size, !=, 0);
if (zio->io_error != 0)
return;
/*
* Verify the cksum of MACs stored in an indirect bp. It will always
* be possible to verify this since it does not require an encryption
* key.
*/
if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
zio_crypt_decode_mac_bp(bp, mac);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/*
* We haven't decompressed the data yet, but
* zio_crypt_do_indirect_mac_checksum() requires
* decompressed data to be able to parse out the MACs
* from the indirect block. We decompress it now and
* throw away the result after we are finished.
*/
abd_t *abd = abd_alloc_linear(lsize, B_TRUE);
ret = zio_decompress_data(BP_GET_COMPRESS(bp),
zio->io_abd, abd, zio->io_size, lsize,
&zio->io_prop.zp_complevel);
if (ret != 0) {
abd_free(abd);
ret = SET_ERROR(EIO);
goto error;
}
ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
abd, lsize, BP_SHOULD_BYTESWAP(bp), mac);
abd_free(abd);
} else {
ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
}
abd_copy(data, zio->io_abd, size);
if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
if (ret != 0)
goto error;
return;
}
/*
* If this is an authenticated block, just check the MAC. It would be
* nice to separate this out into its own flag, but when this was done,
* we had run out of bits in what is now zio_flag_t. Future cleanup
* could make this a flag bit.
*/
if (BP_IS_AUTHENTICATED(bp)) {
if (ot == DMU_OT_OBJSET) {
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
} else {
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
zio->io_abd, size, mac);
if (zio_injection_enabled && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
}
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
}
zio_crypt_decode_params_bp(bp, salt, iv);
if (ot == DMU_OT_INTENT_LOG) {
tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmp, mac);
abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, mac);
}
ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
zio->io_abd, &no_crypt);
if (no_crypt)
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
error:
/* assert that the key was found unless this was speculative */
ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
/*
* If there was a decryption / authentication error return EIO as
* the io_error. If this was not a speculative zio, create an ereport.
*/
if (ret == ECKSUM) {
zio->io_error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, &zio->io_bookmark,
BP_GET_LOGICAL_BIRTH(zio->io_bp));
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, &zio->io_bookmark, zio, 0);
}
} else {
zio->io_error = ret;
}
}
/*
* ==========================================================================
* I/O parent/child relationships and pipeline interlocks
* ==========================================================================
*/
zio_t *
zio_walk_parents(zio_t *cio, zio_link_t **zl)
{
list_t *pl = &cio->io_parent_list;
*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_child == cio);
return ((*zl)->zl_parent);
}
zio_t *
zio_walk_children(zio_t *pio, zio_link_t **zl)
{
list_t *cl = &pio->io_child_list;
ASSERT(MUTEX_HELD(&pio->io_lock));
*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_parent == pio);
return ((*zl)->zl_child);
}
zio_t *
zio_unique_parent(zio_t *cio)
{
zio_link_t *zl = NULL;
zio_t *pio = zio_walk_parents(cio, &zl);
VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
return (pio);
}
void
zio_add_child(zio_t *pio, zio_t *cio)
{
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
/* Parent should not have READY stage if child doesn't have it. */
IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
(cio->io_child_type != ZIO_CHILD_VDEV),
(pio->io_pipeline & ZIO_STAGE_READY) == 0);
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
zl->zl_parent = pio;
zl->zl_child = cio;
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
uint64_t *countp = pio->io_children[cio->io_child_type];
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
countp[w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
list_insert_head(&cio->io_parent_list, zl);
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
}
void
zio_add_child_first(zio_t *pio, zio_t *cio)
{
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
/* Parent should not have READY stage if child doesn't have it. */
IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
(cio->io_child_type != ZIO_CHILD_VDEV),
(pio->io_pipeline & ZIO_STAGE_READY) == 0);
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
zl->zl_parent = pio;
zl->zl_child = cio;
ASSERT(list_is_empty(&cio->io_parent_list));
list_insert_head(&cio->io_parent_list, zl);
mutex_enter(&pio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
uint64_t *countp = pio->io_children[cio->io_child_type];
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
countp[w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
mutex_exit(&pio->io_lock);
}
static void
zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
{
ASSERT(zl->zl_parent == pio);
ASSERT(zl->zl_child == cio);
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
list_remove(&pio->io_child_list, zl);
list_remove(&cio->io_parent_list, zl);
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
kmem_cache_free(zio_link_cache, zl);
}
static boolean_t
zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
{
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
ASSERT(zio->io_stall == NULL);
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
continue;
uint64_t *countp = &zio->io_children[c][wait];
if (*countp != 0) {
zio->io_stage >>= 1;
ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
zio->io_stall = countp;
waiting = B_TRUE;
break;
}
}
mutex_exit(&zio->io_lock);
return (waiting);
}
__attribute__((always_inline))
static inline void
zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
zio_t **next_to_executep)
{
uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
int *errorp = &pio->io_child_error[zio->io_child_type];
mutex_enter(&pio->io_lock);
if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
*errorp = zio_worst_error(*errorp, zio->io_error);
pio->io_reexecute |= zio->io_reexecute;
ASSERT3U(*countp, >, 0);
/*
* Propogate the Direct I/O checksum verify failure to the parent.
*/
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
pio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
(*countp)--;
if (*countp == 0 && pio->io_stall == countp) {
zio_taskq_type_t type =
pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
ZIO_TASKQ_INTERRUPT;
pio->io_stall = NULL;
mutex_exit(&pio->io_lock);
/*
* If we can tell the caller to execute this parent next, do
* so. We do this if the parent's zio type matches the child's
* type, or if it's a zio_null() with no done callback, and so
* has no actual work to do. Otherwise dispatch the parent zio
* in its own taskq.
*
* Having the caller execute the parent when possible reduces
* locking on the zio taskq's, reduces context switch
* overhead, and has no recursion penalty. Note that one
* read from disk typically causes at least 3 zio's: a
* zio_null(), the logical zio_read(), and then a physical
* zio. When the physical ZIO completes, we are able to call
* zio_done() on all 3 of these zio's from one invocation of
* zio_execute() by returning the parent back to
* zio_execute(). Since the parent isn't executed until this
* thread returns back to zio_execute(), the caller should do
* so promptly.
*
* In other cases, dispatching the parent prevents
* overflowing the stack when we have deeply nested
* parent-child relationships, as we do with the "mega zio"
* of writes for spa_sync(), and the chain of ZIL blocks.
*/
if (next_to_executep != NULL && *next_to_executep == NULL &&
(pio->io_type == zio->io_type ||
(pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) {
*next_to_executep = pio;
} else {
zio_taskq_dispatch(pio, type, B_FALSE);
}
} else {
mutex_exit(&pio->io_lock);
}
}
static void
zio_inherit_child_errors(zio_t *zio, enum zio_child c)
{
if (zio->io_child_error[c] != 0 && zio->io_error == 0)
zio->io_error = zio->io_child_error[c];
}
int
zio_bookmark_compare(const void *x1, const void *x2)
{
const zio_t *z1 = x1;
const zio_t *z2 = x2;
if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
return (-1);
if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
return (1);
if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
return (-1);
if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
return (1);
if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
return (-1);
if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
return (1);
if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
return (-1);
if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
return (1);
if (z1 < z2)
return (-1);
if (z1 > z2)
return (1);
return (0);
}
/*
* ==========================================================================
* Create the various types of I/O (read, write, free, etc)
* ==========================================================================
*/
static zio_t *
zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
void *private, zio_type_t type, zio_priority_t priority,
zio_flag_t flags, vdev_t *vd, uint64_t offset,
const zbookmark_phys_t *zb, enum zio_stage stage,
enum zio_stage pipeline)
{
zio_t *zio;
IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
ASSERT(vd || stage == ZIO_STAGE_OPEN);
IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
memset(zio, 0, sizeof (zio_t));
mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
list_create(&zio->io_parent_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_parent_node));
list_create(&zio->io_child_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_child_node));
metaslab_trace_init(&zio->io_alloc_list);
if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV;
else if (flags & ZIO_FLAG_GANG_CHILD)
zio->io_child_type = ZIO_CHILD_GANG;
else if (flags & ZIO_FLAG_DDT_CHILD)
zio->io_child_type = ZIO_CHILD_DDT;
else
zio->io_child_type = ZIO_CHILD_LOGICAL;
if (bp != NULL) {
if (type != ZIO_TYPE_WRITE ||
zio->io_child_type == ZIO_CHILD_DDT) {
zio->io_bp_copy = *bp;
zio->io_bp = &zio->io_bp_copy; /* so caller can free */
} else {
zio->io_bp = (blkptr_t *)bp;
}
zio->io_bp_orig = *bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_logical = zio;
if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
pipeline |= ZIO_GANG_STAGES;
}
zio->io_spa = spa;
zio->io_txg = txg;
zio->io_done = done;
zio->io_private = private;
zio->io_type = type;
zio->io_priority = priority;
zio->io_vd = vd;
zio->io_offset = offset;
zio->io_orig_abd = zio->io_abd = data;
zio->io_orig_size = zio->io_size = psize;
zio->io_lsize = lsize;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_orig_stage = zio->io_stage = stage;
zio->io_orig_pipeline = zio->io_pipeline = pipeline;
zio->io_pipeline_trace = ZIO_STAGE_OPEN;
zio->io_allocator = ZIO_ALLOCATOR_NONE;
zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) ||
(pipeline & ZIO_STAGE_READY) == 0;
zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
if (zb != NULL)
zio->io_bookmark = *zb;
if (pio != NULL) {
zio->io_metaslab_class = pio->io_metaslab_class;
if (zio->io_logical == NULL)
zio->io_logical = pio->io_logical;
if (zio->io_child_type == ZIO_CHILD_GANG)
zio->io_gang_leader = pio->io_gang_leader;
zio_add_child_first(pio, zio);
}
taskq_init_ent(&zio->io_tqent);
return (zio);
}
void
zio_destroy(zio_t *zio)
{
metaslab_trace_fini(&zio->io_alloc_list);
list_destroy(&zio->io_parent_list);
list_destroy(&zio->io_child_list);
mutex_destroy(&zio->io_lock);
cv_destroy(&zio->io_cv);
kmem_cache_free(zio_cache, zio);
}
/*
* ZIO intended to be between others. Provides synchronization at READY
* and DONE pipeline stages and calls the respective callbacks.
*/
zio_t *
zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
void *private, zio_flag_t flags)
{
zio_t *zio;
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
return (zio);
}
/*
* ZIO intended to be a root of a tree. Unlike null ZIO does not have a
* READY pipeline stage (is ready on creation), so it should not be used
* as child of any ZIO that may need waiting for grandchildren READY stage
* (any other ZIO type).
*/
zio_t *
zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
{
zio_t *zio;
zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL,
ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE);
return (zio);
}
static int
zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
enum blk_verify_flag blk_verify, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("bad blkptr at %px: "
"DVA[0]=%#llx/%#llx "
"DVA[1]=%#llx/%#llx "
"DVA[2]=%#llx/%#llx "
"prop=%#llx "
"pad=%#llx,%#llx "
"phys_birth=%#llx "
"birth=%#llx "
"fill=%#llx "
"cksum=%#llx/%#llx/%#llx/%#llx",
bp,
(long long)bp->blk_dva[0].dva_word[0],
(long long)bp->blk_dva[0].dva_word[1],
(long long)bp->blk_dva[1].dva_word[0],
(long long)bp->blk_dva[1].dva_word[1],
(long long)bp->blk_dva[2].dva_word[0],
(long long)bp->blk_dva[2].dva_word[1],
(long long)bp->blk_prop,
(long long)bp->blk_pad[0],
(long long)bp->blk_pad[1],
(long long)BP_GET_PHYSICAL_BIRTH(bp),
(long long)BP_GET_LOGICAL_BIRTH(bp),
(long long)bp->blk_fill,
(long long)bp->blk_cksum.zc_word[0],
(long long)bp->blk_cksum.zc_word[1],
(long long)bp->blk_cksum.zc_word[2],
(long long)bp->blk_cksum.zc_word[3]);
switch (blk_verify) {
case BLK_VERIFY_HALT:
zfs_panic_recover("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_LOG:
zfs_dbgmsg("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_ONLY:
break;
}
return (1);
}
/*
* Verify the block pointer fields contain reasonable values. This means
* it only contains known object types, checksum/compression identifiers,
* block sizes within the maximum allowed limits, valid DVAs, etc.
*
* If everything checks out B_TRUE is returned. The zfs_blkptr_verify
* argument controls the behavior when an invalid field is detected.
*
* Values for blk_verify_flag:
* BLK_VERIFY_ONLY: evaluate the block
* BLK_VERIFY_LOG: evaluate the block and log problems
* BLK_VERIFY_HALT: call zfs_panic_recover on error
*
* Values for blk_config_flag:
* BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
* BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
* obtained for reader
* BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
* performance
*/
boolean_t
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
{
int errors = 0;
if (unlikely(!DMU_OT_IS_VALID(BP_GET_TYPE(bp)))) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (unlikely(BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (unlikely(BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (unlikely(BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
if (unlikely(BPE_GET_PSIZE(bp) > BPE_PAYLOAD_SIZE)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid PSIZE %llu",
bp, (longlong_t)BPE_GET_PSIZE(bp));
}
return (errors == 0);
}
if (unlikely(BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (unlikely(BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
/*
* Do not verify individual DVAs if the config is not trusted. This
* will be done once the zio is executed in vdev_mirror_map_alloc.
*/
if (unlikely(!spa->spa_trust_config))
return (errors == 0);
switch (blk_config) {
case BLK_CONFIG_HELD:
ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
break;
case BLK_CONFIG_NEEDED:
spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
break;
case BLK_CONFIG_SKIP:
return (errors == 0);
default:
panic("invalid blk_config %u", blk_config);
}
/*
* Pool-specific checks.
*
* Note: it would be nice to verify that the logical birth
* and physical birth are not too large. However,
* spa_freeze() allows the birth time of log blocks (and
* dmu_sync()-ed blocks that are in the log) to be arbitrarily
* large.
*/
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
const dva_t *dva = &bp->blk_dva[i];
uint64_t vdevid = DVA_GET_VDEV(dva);
if (unlikely(vdevid >= spa->spa_root_vdev->vdev_children)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (unlikely(vd == NULL)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (unlikely(vd->vdev_ops == &vdev_hole_ops)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has hole VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_missing_ops) {
/*
* "missing" vdevs are valid during import, but we
* don't have their detailed info (e.g. asize), so
* we can't perform any more checks on them.
*/
continue;
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (unlikely(offset + asize > vd->vdev_asize)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid OFFSET %llu",
bp, i, (longlong_t)offset);
}
}
if (blk_config == BLK_CONFIG_NEEDED)
spa_config_exit(spa, SCL_VDEV, bp);
return (errors == 0);
}
boolean_t
zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
{
(void) bp;
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children)
return (B_FALSE);
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL)
return (B_FALSE);
if (vd->vdev_ops == &vdev_hole_ops)
return (B_FALSE);
if (vd->vdev_ops == &vdev_missing_ops) {
return (B_FALSE);
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize)
return (B_FALSE);
return (B_TRUE);
}
zio_t *
zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp,
data, size, size, done, private,
ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
return (zio);
}
zio_t *
zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *done, void *private, zio_priority_t priority,
zio_flag_t flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
enum zio_stage pipeline = zp->zp_direct_write == B_TRUE ?
ZIO_DIRECT_WRITE_PIPELINE : (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE;
zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, pipeline);
zio->io_ready = ready;
zio->io_children_ready = children_ready;
zio->io_prop = *zp;
/*
* Data can be NULL if we are going to call zio_write_override() to
* provide the already-allocated BP. But we may need the data to
* verify a dedup hit (if requested). In this case, don't try to
* dedup (just take the already-allocated BP verbatim). Encrypted
* dedup blocks need data as well so we also disable dedup in this
* case.
*/
if (data == NULL &&
(zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
}
return (zio);
}
zio_t *
zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
return (zio);
}
void
zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
boolean_t brtwrite)
{
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
ASSERT(!brtwrite || !nopwrite);
/*
* We must reset the io_prop to match the values that existed
* when the bp was first written by dmu_sync() keeping in mind
* that nopwrite and dedup are mutually exclusive.
*/
zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
zio->io_prop.zp_nopwrite = nopwrite;
zio->io_prop.zp_brtwrite = brtwrite;
zio->io_prop.zp_copies = copies;
zio->io_bp_override = bp;
}
void
zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
{
(void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
/*
* The check for EMBEDDED is a performance optimization. We
* process the free here (by ignoring it) rather than
* putting it on the list and then processing it in zio_free_sync().
*/
if (BP_IS_EMBEDDED(bp))
return;
/*
* Frees that are for the currently-syncing txg, are not going to be
* deferred, and which will not need to do a read (i.e. not GANG or
* DEDUP), can be processed immediately. Otherwise, put them on the
* in-memory list for later processing.
*
* Note that we only defer frees after zfs_sync_pass_deferred_free
* when the log space map feature is disabled. [see relevant comment
* in spa_sync_iterate_to_convergence()]
*/
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
txg != spa->spa_syncing_txg ||
(spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
brt_maybe_exists(spa, bp)) {
metaslab_check_free(spa, bp);
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
}
}
/*
* To improve performance, this function may return NULL if we were able
* to do the free immediately. This avoids the cost of creating a zio
* (and linking it to the parent, etc).
*/
zio_t *
zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_flag_t flags)
{
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
if (BP_IS_EMBEDDED(bp))
return (NULL);
metaslab_check_free(spa, bp);
arc_freed(spa, bp);
dsl_scan_freed(spa, bp);
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
brt_maybe_exists(spa, bp)) {
/*
* GANG, DEDUP and BRT blocks can induce a read (for the gang
* block header, the DDT or the BRT), so issue them
* asynchronously so that this thread is not tied up.
*/
enum zio_stage stage =
ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), NULL, NULL,
ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
} else {
metaslab_free(spa, bp, txg, B_FALSE);
return (NULL);
}
}
zio_t *
zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_done_func_t *done, void *private, zio_flag_t flags)
{
zio_t *zio;
(void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
if (BP_IS_EMBEDDED(bp))
return (zio_null(pio, spa, NULL, NULL, NULL, 0));
/*
* A claim is an allocation of a specific block. Claims are needed
* to support immediate writes in the intent log. The issue is that
* immediate writes contain committed data, but in a txg that was
* *not* committed. Upon opening the pool after an unclean shutdown,
* the intent log claims all blocks that contain immediate write data
* so that the SPA knows they're in use.
*
* All claims *must* be resolved in the first txg -- before the SPA
* starts allocating blocks -- so that nothing is allocated twice.
* If txg == 0 we just verify that the block is claimable.
*/
ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <,
spa_min_claim_txg(spa));
ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
ASSERT0(zio->io_queued_timestamp);
return (zio);
}
zio_t *
zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *private, zio_priority_t priority,
zio_flag_t flags, enum trim_flag trim_flags)
{
zio_t *zio;
ASSERT0(vd->vdev_children);
ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
ASSERT3U(size, !=, 0);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
zio->io_trim_flags = trim_flags;
return (zio);
}
zio_t *
zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
return (zio);
}
zio_t *
zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
/*
* zec checksums are necessarily destructive -- they modify
* the end of the write buffer to hold the verifier/checksum.
* Therefore, we must make a local copy in case the data is
* being written to multiple places in parallel.
*/
abd_t *wbuf = abd_alloc_sametype(data, size);
abd_copy(wbuf, data, size);
zio_push_transform(zio, wbuf, size, size, NULL);
}
return (zio);
}
/*
* Create a child I/O to do some work for us.
*/
zio_t *
zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
abd_t *data, uint64_t size, int type, zio_priority_t priority,
zio_flag_t flags, zio_done_func_t *done, void *private)
{
enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
zio_t *zio;
/*
* vdev child I/Os do not propagate their error to the parent.
* Therefore, for correct operation the caller *must* check for
* and handle the error in the child i/o's done callback.
* The only exceptions are i/os that we don't care about
* (OPTIONAL or REPAIR).
*/
ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
done != NULL);
if (type == ZIO_TYPE_READ && bp != NULL) {
/*
* If we have the bp, then the child should perform the
* checksum and the parent need not. This pushes error
* detection as close to the leaves as possible and
* eliminates redundant checksums in the interior nodes.
*/
pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
/*
* We never allow the mirror VDEV to attempt reading from any
* additional data copies after the first Direct I/O checksum
* verify failure. This is to avoid bad data being written out
* through the mirror during self healing. See comment in
* vdev_mirror_io_done() for more details.
*/
ASSERT0(pio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
} else if (type == ZIO_TYPE_WRITE &&
pio->io_prop.zp_direct_write == B_TRUE) {
/*
* By default we only will verify checksums for Direct I/O
* writes for Linux. FreeBSD is able to place user pages under
* write protection before issuing them to the ZIO pipeline.
*
* Checksum validation errors will only be reported through
* the top-level VDEV, which is set by this child ZIO.
*/
ASSERT3P(bp, !=, NULL);
ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL);
pipeline |= ZIO_STAGE_DIO_CHECKSUM_VERIFY;
}
if (vd->vdev_ops->vdev_op_leaf) {
ASSERT0(vd->vdev_children);
offset += VDEV_LABEL_START_SIZE;
}
flags |= ZIO_VDEV_CHILD_FLAGS(pio);
/*
* If we've decided to do a repair, the write is not speculative --
* even if the original read was.
*/
if (flags & ZIO_FLAG_IO_REPAIR)
flags &= ~ZIO_FLAG_SPECULATIVE;
/*
* If we're creating a child I/O that is not associated with a
* top-level vdev, then the child zio is not an allocating I/O.
* If this is a retried I/O then we ignore it since we will
* have already processed the original allocating I/O.
*/
if (flags & ZIO_FLAG_IO_ALLOCATING &&
(vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
ASSERT(pio->io_metaslab_class != NULL);
ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
ASSERT(type == ZIO_TYPE_WRITE);
ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
pio->io_child_type == ZIO_CHILD_GANG);
flags &= ~ZIO_FLAG_IO_ALLOCATING;
}
zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
return (zio);
}
zio_t *
zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
zio_type_t type, zio_priority_t priority, zio_flag_t flags,
zio_done_func_t *done, void *private)
{
zio_t *zio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
data, size, size, done, private, type, priority,
flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
vd, offset, NULL,
ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
return (zio);
}
/*
* Send a flush command to the given vdev. Unlike most zio creation functions,
* the flush zios are issued immediately. You can wait on pio to pause until
* the flushes complete.
*/
void
zio_flush(zio_t *pio, vdev_t *vd)
{
const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY;
if (vd->vdev_nowritecache)
return;
if (vd->vdev_children == 0) {
zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0,
NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0,
NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE));
} else {
for (uint64_t c = 0; c < vd->vdev_children; c++)
zio_flush(pio, vd->vdev_child[c]);
}
}
void
zio_shrink(zio_t *zio, uint64_t size)
{
ASSERT3P(zio->io_executor, ==, NULL);
ASSERT3U(zio->io_orig_size, ==, zio->io_size);
ASSERT3U(size, <=, zio->io_size);
/*
* We don't shrink for raidz because of problems with the
* reconstruction when reading back less than the block size.
* Note, BP_IS_RAIDZ() assumes no compression.
*/
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
if (!BP_IS_RAIDZ(zio->io_bp)) {
/* we are not doing a raw write */
ASSERT3U(zio->io_size, ==, zio->io_lsize);
zio->io_orig_size = zio->io_size = zio->io_lsize = size;
}
}
/*
* Round provided allocation size up to a value that can be allocated
* by at least some vdev(s) in the pool with minimum or no additional
* padding and without extra space usage on others
*/
static uint64_t
zio_roundup_alloc_size(spa_t *spa, uint64_t size)
{
if (size > spa->spa_min_alloc)
return (roundup(size, spa->spa_gcd_alloc));
return (spa->spa_min_alloc);
}
size_t
zio_get_compression_max_size(enum zio_compress compress, uint64_t gcd_alloc,
uint64_t min_alloc, size_t s_len)
{
size_t d_len;
/* minimum 12.5% must be saved (legacy value, may be changed later) */
d_len = s_len - (s_len >> 3);
/* ZLE can't use exactly d_len bytes, it needs more, so ignore it */
if (compress == ZIO_COMPRESS_ZLE)
return (d_len);
d_len = d_len - d_len % gcd_alloc;
if (d_len < min_alloc)
return (BPE_PAYLOAD_SIZE);
return (d_len);
}
/*
* ==========================================================================
* Prepare to read and write logical blocks
* ==========================================================================
*/
static zio_t *
zio_read_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
uint64_t psize =
BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
zio->io_child_type == ZIO_CHILD_LOGICAL &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decompress);
}
if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
zio->io_child_type == ZIO_CHILD_LOGICAL) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decrypt);
}
if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
int psize = BPE_GET_PSIZE(bp);
void *data = abd_borrow_buf(zio->io_abd, psize);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
decode_embedded_bp_compressed(bp, data);
abd_return_buf_copy(zio->io_abd, data, psize);
} else {
ASSERT(!BP_IS_EMBEDDED(bp));
}
if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
return (zio);
}
static zio_t *
zio_write_bp_init(zio_t *zio)
{
if (!IO_IS_ALLOCATING(zio))
return (zio);
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
if (zio->io_bp_override) {
blkptr_t *bp = zio->io_bp;
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg);
*bp = *zio->io_bp_override;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zp->zp_brtwrite)
return (zio);
ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
if (BP_IS_EMBEDDED(bp))
return (zio);
/*
* If we've been overridden and nopwrite is set then
* set the flag accordingly to indicate that a nopwrite
* has already occurred.
*/
if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
ASSERT(!zp->zp_dedup);
ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
zio->io_flags |= ZIO_FLAG_NOPWRITE;
return (zio);
}
ASSERT(!zp->zp_nopwrite);
if (BP_IS_HOLE(bp) || !zp->zp_dedup)
return (zio);
ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
!zp->zp_encrypt) {
BP_SET_DEDUP(bp, 1);
zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
return (zio);
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
}
return (zio);
}
static zio_t *
zio_write_compress(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_prop_t *zp = &zio->io_prop;
enum zio_compress compress = zp->zp_compress;
blkptr_t *bp = zio->io_bp;
uint64_t lsize = zio->io_lsize;
uint64_t psize = zio->io_size;
uint32_t pass = 1;
/*
* If our children haven't all reached the ready stage,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
return (NULL);
}
if (!IO_IS_ALLOCATING(zio))
return (zio);
if (zio->io_children_ready != NULL) {
/*
* Now that all our children are ready, run the callback
* associated with this zio in case it wants to modify the
* data to be written.
*/
ASSERT3U(zp->zp_level, >, 0);
zio->io_children_ready(zio);
}
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) {
/*
* We're rewriting an existing block, which means we're
* working on behalf of spa_sync(). For spa_sync() to
* converge, it must eventually be the case that we don't
* have to allocate new blocks. But compression changes
* the blocksize, which forces a reallocate, and makes
* convergence take longer. Therefore, after the first
* few passes, stop compressing to ensure convergence.
*/
pass = spa_sync_pass(spa);
ASSERT(zio->io_txg == spa_syncing_txg(spa));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!BP_GET_DEDUP(bp));
if (pass >= zfs_sync_pass_dont_compress)
compress = ZIO_COMPRESS_OFF;
/* Make sure someone doesn't change their mind on overwrites */
ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
MIN(zp->zp_copies, spa_max_replication(spa))
== BP_GET_NDVAS(bp));
}
/* If it's a compressed write that is not raw, compress the buffer. */
if (compress != ZIO_COMPRESS_OFF &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
abd_t *cabd = NULL;
if (abd_cmp_zero(zio->io_abd, lsize) == 0)
psize = 0;
else if (compress == ZIO_COMPRESS_EMPTY)
psize = lsize;
else
psize = zio_compress_data(compress, zio->io_abd, &cabd,
lsize,
zio_get_compression_max_size(compress,
spa->spa_gcd_alloc, spa->spa_min_alloc, lsize),
zp->zp_complevel);
if (psize == 0) {
compress = ZIO_COMPRESS_OFF;
} else if (psize >= lsize) {
compress = ZIO_COMPRESS_OFF;
if (cabd != NULL)
abd_free(cabd);
} else if (!zp->zp_dedup && !zp->zp_encrypt &&
psize <= BPE_PAYLOAD_SIZE &&
zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
void *cbuf = abd_borrow_buf_copy(cabd, lsize);
encode_embedded_bp_compressed(bp,
cbuf, compress, lsize, psize);
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
BP_SET_TYPE(bp, zio->io_prop.zp_type);
BP_SET_LEVEL(bp, zio->io_prop.zp_level);
abd_return_buf(cabd, cbuf, lsize);
abd_free(cabd);
BP_SET_LOGICAL_BIRTH(bp, zio->io_txg);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
ASSERT(spa_feature_is_active(spa,
SPA_FEATURE_EMBEDDED_DATA));
return (zio);
} else {
/*
* Round compressed size up to the minimum allocation
* size of the smallest-ashift device, and zero the
* tail. This ensures that the compressed size of the
* BP (and thus compressratio property) are correct,
* in that we charge for the padding used to fill out
* the last sector.
*/
size_t rounded = (size_t)zio_roundup_alloc_size(spa,
psize);
if (rounded >= lsize) {
compress = ZIO_COMPRESS_OFF;
abd_free(cabd);
psize = lsize;
} else {
abd_zero_off(cabd, psize, rounded - psize);
psize = rounded;
zio_push_transform(zio, cabd,
psize, lsize, NULL);
}
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
zp->zp_type == DMU_OT_DNODE) {
/*
* The DMU actually relies on the zio layer's compression
* to free metadnode blocks that have had all contained
* dnodes freed. As a result, even when doing a raw
* receive, we must check whether the block can be compressed
* to a hole.
*/
if (abd_cmp_zero(zio->io_abd, lsize) == 0) {
psize = 0;
compress = ZIO_COMPRESS_OFF;
} else {
psize = lsize;
}
} else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
!(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
/*
* If we are raw receiving an encrypted dataset we should not
* take this codepath because it will change the on-disk block
* and decryption will fail.
*/
size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
lsize);
if (rounded != psize) {
abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, rounded, NULL);
}
} else {
ASSERT3U(psize, !=, 0);
}
/*
* The final pass of spa_sync() must be all rewrites, but the first
* few passes offer a trade-off: allocating blocks defers convergence,
* but newly allocated blocks are sequential, so they can be written
* to disk faster. Therefore, we allow the first few passes of
* spa_sync() to allocate new blocks, but force rewrites after that.
* There should only be a handful of blocks after pass 1 in any case.
*/
if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg &&
BP_GET_PSIZE(bp) == psize &&
pass >= zfs_sync_pass_rewrite) {
VERIFY3U(psize, !=, 0);
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else {
BP_ZERO(bp);
zio->io_pipeline = ZIO_WRITE_PIPELINE;
}
if (psize == 0) {
if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_BIRTH(bp, zio->io_txg, 0);
}
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
} else {
ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, compress);
BP_SET_CHECKSUM(bp, zp->zp_checksum);
BP_SET_DEDUP(bp, zp->zp_dedup);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
if (zp->zp_dedup) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!zp->zp_encrypt ||
DMU_OT_IS_ENCRYPTED(zp->zp_type));
zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
}
if (zp->zp_nopwrite) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
}
}
return (zio);
}
static zio_t *
zio_free_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
if (BP_GET_DEDUP(bp))
zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
}
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
return (zio);
}
/*
* ==========================================================================
* Execute the I/O pipeline
* ==========================================================================
*/
static void
zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
/*
* If we're a config writer or a probe, the normal issue and
* interrupt threads may all be blocked waiting for the config lock.
* In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
*/
if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
t = ZIO_TYPE_NULL;
/*
* A similar issue exists for the L2ARC write thread until L2ARC 2.0.
*/
if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
t = ZIO_TYPE_NULL;
/*
* If this is a high priority I/O, then use the high priority taskq if
* available or cut the line otherwise.
*/
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) {
if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
q++;
else
cutinline = B_TRUE;
}
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
spa_taskq_dispatch(spa, t, q, zio_execute, zio, cutinline);
}
static boolean_t
zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{
spa_t *spa = zio->io_spa;
taskq_t *tq = taskq_of_curthread();
for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t i;
for (i = 0; i < tqs->stqs_count; i++) {
if (tqs->stqs_taskq[i] == tq)
return (B_TRUE);
}
}
return (B_FALSE);
}
static zio_t *
zio_issue_async(zio_t *zio)
{
ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio));
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
void
zio_interrupt(void *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
}
void
zio_delay_interrupt(zio_t *zio)
{
/*
* The timeout_generic() function isn't defined in userspace, so
* rather than trying to implement the function, the zio delay
* functionality has been disabled for userspace builds.
*/
#ifdef _KERNEL
/*
* If io_target_timestamp is zero, then no delay has been registered
* for this IO, thus jump to the end of this function and "skip" the
* delay; issuing it directly to the zio layer.
*/
if (zio->io_target_timestamp != 0) {
hrtime_t now = gethrtime();
if (now >= zio->io_target_timestamp) {
/*
* This IO has already taken longer than the target
* delay to complete, so we don't want to delay it
* any longer; we "miss" the delay and issue it
* directly to the zio layer. This is likely due to
* the target latency being set to a value less than
* the underlying hardware can satisfy (e.g. delay
* set to 1ms, but the disks take 10ms to complete an
* IO request).
*/
DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
hrtime_t, now);
zio_interrupt(zio);
} else {
taskqid_t tid;
hrtime_t diff = zio->io_target_timestamp - now;
int ticks = MAX(1, NSEC_TO_TICK(diff));
clock_t expire_at_tick = ddi_get_lbolt() + ticks;
DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
hrtime_t, now, hrtime_t, diff);
tid = taskq_dispatch_delay(system_taskq, zio_interrupt,
zio, TQ_NOSLEEP, expire_at_tick);
if (tid == TASKQID_INVALID) {
/*
* Couldn't allocate a task. Just finish the
* zio without a delay.
*/
zio_interrupt(zio);
}
}
return;
}
#endif
DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
zio_interrupt(zio);
}
static void
zio_deadman_impl(zio_t *pio, int ziodepth)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
vdev_t *vd = pio->io_vd;
if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
zbookmark_phys_t *zb = &pio->io_bookmark;
uint64_t delta = gethrtime() - pio->io_timestamp;
uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
"delta=%llu queued=%llu io=%llu "
"path=%s "
"last=%llu type=%d "
"priority=%d flags=0x%llx stage=0x%x "
"pipeline=0x%x pipeline-trace=0x%x "
"objset=%llu object=%llu "
"level=%llu blkid=%llu "
"offset=%llu size=%llu "
"error=%d",
ziodepth, pio, pio->io_timestamp,
(u_longlong_t)delta, pio->io_delta, pio->io_delay,
vd ? vd->vdev_path : "NULL",
vq ? vq->vq_io_complete_ts : 0, pio->io_type,
pio->io_priority, (u_longlong_t)pio->io_flags,
pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
(u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
(u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
pio->io_error);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
pio->io_spa, vd, zb, pio, 0);
if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
taskq_empty_ent(&pio->io_tqent)) {
zio_interrupt(pio);
}
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_deadman_impl(cio, ziodepth + 1);
}
mutex_exit(&pio->io_lock);
}
/*
* Log the critical information describing this zio and all of its children
* using the zfs_dbgmsg() interface then post deadman event for the ZED.
*/
void
zio_deadman(zio_t *pio, const char *tag)
{
spa_t *spa = pio->io_spa;
char *name = spa_name(spa);
if (!zfs_deadman_enabled || spa_suspended(spa))
return;
zio_deadman_impl(pio, 0);
switch (spa_get_deadman_failmode(spa)) {
case ZIO_FAILURE_MODE_WAIT:
zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_CONTINUE:
zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_PANIC:
fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
break;
}
}
/*
* Execute the I/O pipeline until one of the following occurs:
* (1) the I/O completes; (2) the pipeline stalls waiting for
* dependent child I/Os; (3) the I/O issues, so we're waiting
* for an I/O completion interrupt; (4) the I/O is delegated by
* vdev-level caching or aggregation; (5) the I/O is deferred
* due to vdev-level queueing; (6) the I/O is handed off to
* another thread. In all cases, the pipeline stops whenever
* there's no CPU work; it never burns a thread in cv_wait_io().
*
* There's no locking on io_stage because there's no legitimate way
* for multiple threads to be attempting to process the same I/O.
*/
static zio_pipe_stage_t *zio_pipeline[];
/*
* zio_execute() is a wrapper around the static function
* __zio_execute() so that we can force __zio_execute() to be
* inlined. This reduces stack overhead which is important
* because __zio_execute() is called recursively in several zio
* code paths. zio_execute() itself cannot be inlined because
* it is externally visible.
*/
void
zio_execute(void *zio)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
__zio_execute(zio);
spl_fstrans_unmark(cookie);
}
/*
* Used to determine if in the current context the stack is sized large
* enough to allow zio_execute() to be called recursively. A minimum
* stack size of 16K is required to avoid needing to re-dispatch the zio.
*/
static boolean_t
zio_execute_stack_check(zio_t *zio)
{
#if !defined(HAVE_LARGE_STACKS)
dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
/* Executing in txg_sync_thread() context. */
if (dp && curthread == dp->dp_tx.tx_sync_thread)
return (B_TRUE);
/* Pool initialization outside of zio_taskq context. */
if (dp && spa_is_initializing(dp->dp_spa) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
return (B_TRUE);
#else
(void) zio;
#endif /* HAVE_LARGE_STACKS */
return (B_FALSE);
}
__attribute__((always_inline))
static inline void
__zio_execute(zio_t *zio)
{
ASSERT3U(zio->io_queued_timestamp, >, 0);
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
zio->io_executor = curthread;
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
ASSERT(zio->io_stall == NULL);
do {
stage <<= 1;
} while ((stage & pipeline) == 0);
ASSERT(stage <= ZIO_STAGE_DONE);
/*
* If we are in interrupt context and this pipeline stage
* will grab a config lock that is held across I/O,
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
/*
* If the current context doesn't have large enough stacks
* the zio must be issued asynchronously to prevent overflow.
*/
if (zio_execute_stack_check(zio)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
zio->io_stage = stage;
zio->io_pipeline_trace |= zio->io_stage;
/*
* The zio pipeline stage returns the next zio to execute
* (typically the same as this one), or NULL if we should
* stop.
*/
zio = zio_pipeline[highbit64(stage) - 1](zio);
if (zio == NULL)
return;
}
}
/*
* ==========================================================================
* Initiate I/O, either sync or async
* ==========================================================================
*/
int
zio_wait(zio_t *zio)
{
/*
* Some routines, like zio_free_sync(), may return a NULL zio
* to avoid the performance overhead of creating and then destroying
* an unneeded zio. For the callers' simplicity, we accept a NULL
* zio and ignore it.
*/
if (zio == NULL)
return (0);
long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
int error;
ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
ASSERT3P(zio->io_executor, ==, NULL);
zio->io_waiter = curthread;
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
if (zio->io_type == ZIO_TYPE_WRITE) {
spa_select_allocator(zio);
}
__zio_execute(zio);
mutex_enter(&zio->io_lock);
while (zio->io_executor != NULL) {
error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
ddi_get_lbolt() + timeout);
if (zfs_deadman_enabled && error == -1 &&
gethrtime() - zio->io_queued_timestamp >
spa_deadman_ziotime(zio->io_spa)) {
mutex_exit(&zio->io_lock);
timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
zio_deadman(zio, FTAG);
mutex_enter(&zio->io_lock);
}
}
mutex_exit(&zio->io_lock);
error = zio->io_error;
zio_destroy(zio);
return (error);
}
void
zio_nowait(zio_t *zio)
{
/*
* See comment in zio_wait().
*/
if (zio == NULL)
return;
ASSERT3P(zio->io_executor, ==, NULL);
if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
list_is_empty(&zio->io_parent_list)) {
zio_t *pio;
/*
* This is a logical async I/O with no parent to wait for it.
* We add it to the spa_async_root_zio "Godfather" I/O which
* will ensure they complete prior to unloading the pool.
*/
spa_t *spa = zio->io_spa;
pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
zio_add_child(pio, zio);
}
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
if (zio->io_type == ZIO_TYPE_WRITE) {
spa_select_allocator(zio);
}
__zio_execute(zio);
}
/*
* ==========================================================================
* Reexecute, cancel, or suspend/resume failed I/O
* ==========================================================================
*/
static void
zio_reexecute(void *arg)
{
zio_t *pio = arg;
zio_t *cio, *cio_next, *gio;
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
ASSERT(pio->io_gang_leader == NULL);
ASSERT(pio->io_gang_tree == NULL);
mutex_enter(&pio->io_lock);
pio->io_flags = pio->io_orig_flags;
pio->io_stage = pio->io_orig_stage;
pio->io_pipeline = pio->io_orig_pipeline;
pio->io_reexecute = 0;
pio->io_flags |= ZIO_FLAG_REEXECUTED;
pio->io_pipeline_trace = 0;
pio->io_error = 0;
pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) ||
(pio->io_pipeline & ZIO_STAGE_READY) == 0;
pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE);
+
+ /*
+ * It's possible for a failed ZIO to be a descendant of more than one
+ * ZIO tree. When reexecuting it, we have to be sure to add its wait
+ * states to all parent wait counts.
+ *
+ * Those parents, in turn, may have other children that are currently
+ * active, usually because they've already been reexecuted after
+ * resuming. Those children may be executing and may call
+ * zio_notify_parent() at the same time as we're updating our parent's
+ * counts. To avoid races while updating the counts, we take
+ * gio->io_lock before each update.
+ */
zio_link_t *zl = NULL;
while ((gio = zio_walk_parents(pio, &zl)) != NULL) {
+ mutex_enter(&gio->io_lock);
for (int w = 0; w < ZIO_WAIT_TYPES; w++) {
gio->io_children[pio->io_child_type][w] +=
!pio->io_state[w];
}
+ mutex_exit(&gio->io_lock);
}
+
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
pio->io_child_error[c] = 0;
if (IO_IS_ALLOCATING(pio))
BP_ZERO(pio->io_bp);
/*
* As we reexecute pio's children, new children could be created.
* New children go to the head of pio's io_child_list, however,
* so we will (correctly) not reexecute them. The key is that
* the remainder of pio's io_child_list, from 'cio_next' onward,
* cannot be affected by any side effects of reexecuting 'cio'.
*/
zl = NULL;
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
mutex_exit(&pio->io_lock);
zio_reexecute(cio);
mutex_enter(&pio->io_lock);
}
mutex_exit(&pio->io_lock);
/*
* Now that all children have been reexecuted, execute the parent.
* We don't reexecute "The Godfather" I/O here as it's the
* responsibility of the caller to wait on it.
*/
if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
pio->io_queued_timestamp = gethrtime();
__zio_execute(pio);
}
}
void
zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
{
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
fm_panic("Pool '%s' has encountered an uncorrectable I/O "
"failure and the failure mode property for this pool "
"is set to panic.", spa_name(spa));
if (reason != ZIO_SUSPEND_MMP) {
cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable "
"I/O failure and has been suspended.", spa_name(spa));
}
(void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
NULL, NULL, 0);
mutex_enter(&spa->spa_suspend_lock);
if (spa->spa_suspend_zio_root == NULL)
spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
spa->spa_suspended = reason;
if (zio != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
ASSERT(zio != spa->spa_suspend_zio_root);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio_unique_parent(zio) == NULL);
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
zio_add_child(spa->spa_suspend_zio_root, zio);
}
mutex_exit(&spa->spa_suspend_lock);
}
int
zio_resume(spa_t *spa)
{
zio_t *pio;
/*
* Reexecute all previously suspended i/o.
*/
mutex_enter(&spa->spa_suspend_lock);
if (spa->spa_suspended != ZIO_SUSPEND_NONE)
cmn_err(CE_WARN, "Pool '%s' was suspended and is being "
"resumed. Failed I/O will be retried.",
spa_name(spa));
spa->spa_suspended = ZIO_SUSPEND_NONE;
cv_broadcast(&spa->spa_suspend_cv);
pio = spa->spa_suspend_zio_root;
spa->spa_suspend_zio_root = NULL;
mutex_exit(&spa->spa_suspend_lock);
if (pio == NULL)
return (0);
zio_reexecute(pio);
return (zio_wait(pio));
}
void
zio_resume_wait(spa_t *spa)
{
mutex_enter(&spa->spa_suspend_lock);
while (spa_suspended(spa))
cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
mutex_exit(&spa->spa_suspend_lock);
}
/*
* ==========================================================================
* Gang blocks.
*
* A gang block is a collection of small blocks that looks to the DMU
* like one large block. When zio_dva_allocate() cannot find a block
* of the requested size, due to either severe fragmentation or the pool
* being nearly full, it calls zio_write_gang_block() to construct the
* block from smaller fragments.
*
* A gang block consists of a gang header (zio_gbh_phys_t) and up to
* three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
* an indirect block: it's an array of block pointers. It consumes
* only one sector and hence is allocatable regardless of fragmentation.
* The gang header's bps point to its gang members, which hold the data.
*
* Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
* as the verifier to ensure uniqueness of the SHA256 checksum.
* Critically, the gang block bp's blk_cksum is the checksum of the data,
* not the gang header. This ensures that data block signatures (needed for
* deduplication) are independent of how the block is physically stored.
*
* Gang blocks can be nested: a gang member may itself be a gang block.
* Thus every gang block is a tree in which root and all interior nodes are
* gang headers, and the leaves are normal blocks that contain user data.
* The root of the gang tree is called the gang leader.
*
* To perform any operation (read, rewrite, free, claim) on a gang block,
* zio_gang_assemble() first assembles the gang tree (minus data leaves)
* in the io_gang_tree field of the original logical i/o by recursively
* reading the gang leader and all gang headers below it. This yields
* an in-core tree containing the contents of every gang header and the
* bps for every constituent of the gang block.
*
* With the gang tree now assembled, zio_gang_issue() just walks the gang tree
* and invokes a callback on each bp. To free a gang block, zio_gang_issue()
* calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
* zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
* zio_read_gang() is a wrapper around zio_read() that omits reading gang
* headers, since we already have those in io_gang_tree. zio_rewrite_gang()
* performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
* of the gang header plus zio_checksum_compute() of the data to update the
* gang header's blk_cksum as described above.
*
* The two-phase assemble/issue model solves the problem of partial failure --
* what if you'd freed part of a gang block but then couldn't read the
* gang header for another part? Assembling the entire gang tree first
* ensures that all the necessary gang header I/O has succeeded before
* starting the actual work of free, claim, or write. Once the gang tree
* is assembled, free and claim are in-memory operations that cannot fail.
*
* In the event that a gang write fails, zio_dva_unallocate() walks the
* gang tree to immediately free (i.e. insert back into the space map)
* everything we've allocated. This ensures that we don't get ENOSPC
* errors during repeated suspend/resume cycles due to a flaky device.
*
* Gang rewrites only happen during sync-to-convergence. If we can't assemble
* the gang tree, we won't modify the block, so we can safely defer the free
* (knowing that the block is still intact). If we *can* assemble the gang
* tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
* each constituent bp and we can allocate a new block on the next sync pass.
*
* In all cases, the gang tree allows complete recovery from partial failure.
* ==========================================================================
*/
static void
zio_gang_issue_func_done(zio_t *zio)
{
abd_free(zio->io_abd);
}
static zio_t *
zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
if (gn != NULL)
return (pio);
return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
BP_GET_PSIZE(bp), zio_gang_issue_func_done,
NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark));
}
static zio_t *
zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
zio_t *zio;
if (gn != NULL) {
abd_t *gbh_abd =
abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark);
/*
* As we rewrite each gang header, the pipeline will compute
* a new gang block header checksum for it; but no one will
* compute a new data checksum, so we do that here. The one
* exception is the gang leader: the pipeline already computed
* its data checksum because that stage precedes gang assembly.
* (Presently, nothing actually uses interior data checksums;
* this is just good hygiene.)
*/
if (gn != pio->io_gang_leader->io_gang_tree) {
abd_t *buf = abd_get_offset(data, offset);
zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
buf, BP_GET_PSIZE(bp));
abd_free(buf);
}
/*
* If we are here to damage data for testing purposes,
* leave the GBH alone so that we can detect the damage.
*/
if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
} else {
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
abd_get_offset(data, offset), BP_GET_PSIZE(bp),
zio_gang_issue_func_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
}
return (zio);
}
static zio_t *
zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
(void) gn, (void) data, (void) offset;
zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
ZIO_GANG_CHILD_FLAGS(pio));
if (zio == NULL) {
zio = zio_null(pio, pio->io_spa,
NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
}
return (zio);
}
static zio_t *
zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
(void) gn, (void) data, (void) offset;
return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
}
static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
NULL,
zio_read_gang,
zio_rewrite_gang,
zio_free_gang,
zio_claim_gang,
NULL
};
static void zio_gang_tree_assemble_done(zio_t *zio);
static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn;
ASSERT(*gnpp == NULL);
gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
*gnpp = gn;
return (gn);
}
static void
zio_gang_node_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
ASSERT(gn->gn_child[g] == NULL);
zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
kmem_free(gn, sizeof (*gn));
*gnpp = NULL;
}
static void
zio_gang_tree_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
if (gn == NULL)
return;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
zio_gang_tree_free(&gn->gn_child[g]);
zio_gang_node_free(gnpp);
}
static void
zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
ASSERT(gio->io_gang_leader == gio);
ASSERT(BP_IS_GANG(bp));
zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_gang_tree_assemble_done, gn, gio->io_priority,
ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
}
static void
zio_gang_tree_assemble_done(zio_t *zio)
{
zio_t *gio = zio->io_gang_leader;
zio_gang_node_t *gn = zio->io_private;
blkptr_t *bp = zio->io_bp;
ASSERT(gio == zio_unique_parent(zio));
ASSERT(list_is_empty(&zio->io_child_list));
if (zio->io_error)
return;
/* this ABD was created from a linear buf in zio_gang_tree_assemble */
if (BP_SHOULD_BYTESWAP(bp))
byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
abd_free(zio->io_abd);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (!BP_IS_GANG(gbp))
continue;
zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
}
}
static void
zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
uint64_t offset)
{
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
ASSERT(BP_IS_GANG(bp) == !!gn);
ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
/*
* If you're a gang header, your data is in gn->gn_gbh.
* If you're a gang member, your data is in 'data' and gn == NULL.
*/
zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
if (gn != NULL) {
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (BP_IS_HOLE(gbp))
continue;
zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
offset);
offset += BP_GET_PSIZE(gbp);
}
}
if (gn == gio->io_gang_tree)
ASSERT3U(gio->io_size, ==, offset);
if (zio != pio)
zio_nowait(zio);
}
static zio_t *
zio_gang_assemble(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
return (zio);
}
static zio_t *
zio_gang_issue(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
0);
else
zio_gang_tree_free(&zio->io_gang_tree);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
static void
zio_gang_inherit_allocator(zio_t *pio, zio_t *cio)
{
cio->io_allocator = pio->io_allocator;
}
static void
zio_write_gang_member_ready(zio_t *zio)
{
zio_t *pio = zio_unique_parent(zio);
dva_t *cdva = zio->io_bp->blk_dva;
dva_t *pdva = pio->io_bp->blk_dva;
uint64_t asize;
zio_t *gio __maybe_unused = zio->io_gang_leader;
if (BP_IS_HOLE(zio->io_bp))
return;
ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
mutex_enter(&pio->io_lock);
for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
ASSERT(DVA_GET_GANG(&pdva[d]));
asize = DVA_GET_ASIZE(&pdva[d]);
asize += DVA_GET_ASIZE(&cdva[d]);
DVA_SET_ASIZE(&pdva[d], asize);
}
mutex_exit(&pio->io_lock);
}
static void
zio_write_gang_done(zio_t *zio)
{
/*
* The io_abd field will be NULL for a zio with no data. The io_flags
* will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
* check for it here as it is cleared in zio_ready.
*/
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
static zio_t *
zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
{
spa_t *spa = pio->io_spa;
blkptr_t *bp = pio->io_bp;
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
zio_gang_node_t *gn, **gnpp;
zio_gbh_phys_t *gbh;
abd_t *gbh_abd;
uint64_t txg = pio->io_txg;
uint64_t resid = pio->io_size;
uint64_t lsize;
int copies = gio->io_prop.zp_copies;
zio_prop_t zp;
int error;
boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
/*
* If one copy was requested, store 2 copies of the GBH, so that we
* can still traverse all the data (e.g. to free or scrub) even if a
* block is damaged. Note that we can't store 3 copies of the GBH in
* all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
*/
int gbh_copies = copies;
if (gbh_copies == 1) {
gbh_copies = MIN(2, spa_max_replication(spa));
}
ASSERT(ZIO_HAS_ALLOCATOR(pio));
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
flags |= METASLAB_ASYNC_ALLOC;
VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
mca_alloc_slots, pio));
/*
* The logical zio has already placed a reservation for
* 'copies' allocation slots but gang blocks may require
* additional copies. These additional copies
* (i.e. gbh_copies - copies) are guaranteed to succeed
* since metaslab_class_throttle_reserve() always allows
* additional reservations for gang blocks.
*/
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
pio->io_allocator, pio, flags));
}
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
&pio->io_alloc_list, pio, pio->io_allocator);
if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* If we failed to allocate the gang block header then
* we remove any additional allocation reservations that
* we placed here. The original reservation will
* be removed when the logical I/O goes to the ready
* stage.
*/
metaslab_class_throttle_unreserve(mc,
gbh_copies - copies, pio->io_allocator, pio);
}
pio->io_error = error;
return (pio);
}
if (pio == gio) {
gnpp = &gio->io_gang_tree;
} else {
gnpp = pio->io_private;
ASSERT(pio->io_ready == zio_write_gang_member_ready);
}
gn = zio_gang_node_alloc(gnpp);
gbh = gn->gn_gbh;
memset(gbh, 0, SPA_GANGBLOCKSIZE);
gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
/*
* Create the gang header.
*/
zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_write_gang_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
zio_gang_inherit_allocator(pio, zio);
/*
* Create and nowait the gang children.
*/
for (int g = 0; resid != 0; resid -= lsize, g++) {
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
SPA_MINBLOCKSIZE);
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
zp.zp_checksum = gio->io_prop.zp_checksum;
zp.zp_compress = ZIO_COMPRESS_OFF;
zp.zp_complevel = gio->io_prop.zp_complevel;
zp.zp_type = zp.zp_storage_type = DMU_OT_NONE;
zp.zp_level = 0;
zp.zp_copies = gio->io_prop.zp_copies;
zp.zp_dedup = B_FALSE;
zp.zp_dedup_verify = B_FALSE;
zp.zp_nopwrite = B_FALSE;
zp.zp_encrypt = gio->io_prop.zp_encrypt;
zp.zp_byteorder = gio->io_prop.zp_byteorder;
zp.zp_direct_write = B_FALSE;
memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
has_data ? abd_get_offset(pio->io_abd, pio->io_size -
resid) : NULL, lsize, lsize, &zp,
zio_write_gang_member_ready, NULL,
zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
zio_gang_inherit_allocator(zio, cio);
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* Gang children won't throttle but we should
* account for their work, so reserve an allocation
* slot for them here.
*/
VERIFY(metaslab_class_throttle_reserve(mc,
zp.zp_copies, cio->io_allocator, cio, flags));
}
zio_nowait(cio);
}
/*
* Set pio's pipeline to just wait for zio to finish.
*/
pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
zio_nowait(zio);
return (pio);
}
/*
* The zio_nop_write stage in the pipeline determines if allocating a
* new bp is necessary. The nopwrite feature can handle writes in
* either syncing or open context (i.e. zil writes) and as a result is
* mutually exclusive with dedup.
*
* By leveraging a cryptographically secure checksum, such as SHA256, we
* can compare the checksums of the new data and the old to determine if
* allocating a new block is required. Note that our requirements for
* cryptographic strength are fairly weak: there can't be any accidental
* hash collisions, but we don't need to be secure against intentional
* (malicious) collisions. To trigger a nopwrite, you have to be able
* to write the file to begin with, and triggering an incorrect (hash
* collision) nopwrite is no worse than simply writing to the file.
* That said, there are no known attacks against the checksum algorithms
* used for nopwrite, assuming that the salt and the checksums
* themselves remain secret.
*/
static zio_t *
zio_nop_write(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_IS_HOLE(bp));
ASSERT(BP_GET_LEVEL(bp) == 0);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(zp->zp_nopwrite);
ASSERT(!zp->zp_dedup);
ASSERT(zio->io_bp_override == NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Check to see if the original bp and the new bp have matching
* characteristics (i.e. same checksum, compression algorithms, etc).
* If they don't then just continue with the pipeline which will
* allocate a new bp.
*/
if (BP_IS_HOLE(bp_orig) ||
!(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) ||
BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
zp->zp_copies != BP_GET_NDVAS(bp_orig))
return (zio);
/*
* If the checksums match then reset the pipeline so that we
* avoid allocating a new bp and issuing any I/O.
*/
if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
/*
* If we're overwriting a block that is currently on an
* indirect vdev, then ignore the nopwrite request and
* allow a new block to be allocated on a concrete vdev.
*/
spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
vdev_t *tvd = vdev_lookup_top(zio->io_spa,
DVA_GET_VDEV(&bp_orig->blk_dva[d]));
if (tvd->vdev_ops == &vdev_indirect_ops) {
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
return (zio);
}
}
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
*bp = *bp_orig;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
zio->io_flags |= ZIO_FLAG_NOPWRITE;
}
return (zio);
}
/*
* ==========================================================================
* Block Reference Table
* ==========================================================================
*/
static zio_t *
zio_brt_free(zio_t *zio)
{
blkptr_t *bp;
bp = zio->io_bp;
if (BP_GET_LEVEL(bp) > 0 ||
BP_IS_METADATA(bp) ||
!brt_maybe_exists(zio->io_spa, bp)) {
return (zio);
}
if (!brt_entry_decref(zio->io_spa, bp)) {
/*
* This isn't the last reference, so we cannot free
* the data yet.
*/
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
}
return (zio);
}
/*
* ==========================================================================
* Dedup
* ==========================================================================
*/
static void
zio_ddt_child_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ddt_t *ddt;
ddt_entry_t *dde = zio->io_private;
zio_t *pio = zio_unique_parent(zio);
mutex_enter(&pio->io_lock);
ddt = ddt_select(zio->io_spa, bp);
if (zio->io_error == 0) {
ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
/* this phys variant doesn't need repair */
ddt_phys_clear(dde->dde_phys, v);
}
if (zio->io_error == 0 && dde->dde_io->dde_repair_abd == NULL)
dde->dde_io->dde_repair_abd = zio->io_abd;
else
abd_free(zio->io_abd);
mutex_exit(&pio->io_lock);
}
static zio_t *
zio_ddt_read_start(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = ddt_repair_start(ddt, bp);
ddt_phys_variant_t v_self = ddt_phys_select(ddt, dde, bp);
ddt_univ_phys_t *ddp = dde->dde_phys;
blkptr_t blk;
ASSERT(zio->io_vsd == NULL);
zio->io_vsd = dde;
if (v_self == DDT_PHYS_NONE)
return (zio);
/* issue I/O for the other copies */
for (int p = 0; p < DDT_NPHYS(ddt); p++) {
ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
if (ddt_phys_birth(ddp, v) == 0 || v == v_self)
continue;
ddt_bp_create(ddt->ddt_checksum, &dde->dde_key,
ddp, v, &blk);
zio_nowait(zio_read(zio, zio->io_spa, &blk,
abd_alloc_for_io(zio->io_size, B_TRUE),
zio->io_size, zio_ddt_child_read_done, dde,
zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
}
return (zio);
}
zio_nowait(zio_read(zio, zio->io_spa, bp,
zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
return (zio);
}
static zio_t *
zio_ddt_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = zio->io_vsd;
if (ddt == NULL) {
ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
return (zio);
}
if (dde == NULL) {
zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
if (dde->dde_io->dde_repair_abd != NULL) {
abd_copy(zio->io_abd, dde->dde_io->dde_repair_abd,
zio->io_size);
zio->io_child_error[ZIO_CHILD_DDT] = 0;
}
ddt_repair_done(ddt, dde);
zio->io_vsd = NULL;
}
ASSERT(zio->io_vsd == NULL);
return (zio);
}
static boolean_t
zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
{
spa_t *spa = zio->io_spa;
boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
ASSERT(!(zio->io_bp_override && do_raw));
/*
* Note: we compare the original data, not the transformed data,
* because when zio->io_bp is an override bp, we will not have
* pushed the I/O transforms. That's an important optimization
* because otherwise we'd compress/encrypt all dmu_sync() data twice.
* However, we should never get a raw, override zio so in these
* cases we can compare the io_abd directly. This is useful because
* it allows us to do dedup verification even if we don't have access
* to the original data (for instance, if the encryption keys aren't
* loaded).
*/
for (int p = 0; p < DDT_NPHYS(ddt); p++) {
if (DDT_PHYS_IS_DITTO(ddt, p))
continue;
if (dde->dde_io == NULL)
continue;
zio_t *lio = dde->dde_io->dde_lead_zio[p];
if (lio == NULL)
continue;
if (do_raw)
return (lio->io_size != zio->io_size ||
abd_cmp(zio->io_abd, lio->io_abd) != 0);
return (lio->io_orig_size != zio->io_orig_size ||
abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
}
for (int p = 0; p < DDT_NPHYS(ddt); p++) {
ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
uint64_t phys_birth = ddt_phys_birth(dde->dde_phys, v);
if (phys_birth != 0 && do_raw) {
blkptr_t blk = *zio->io_bp;
uint64_t psize;
abd_t *tmpabd;
int error;
ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
psize = BP_GET_PSIZE(&blk);
if (psize != zio->io_size)
return (B_TRUE);
ddt_exit(ddt);
tmpabd = abd_alloc_for_io(psize, B_TRUE);
error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_RAW, &zio->io_bookmark));
if (error == 0) {
if (abd_cmp(tmpabd, zio->io_abd) != 0)
error = SET_ERROR(ENOENT);
}
abd_free(tmpabd);
ddt_enter(ddt);
return (error != 0);
} else if (phys_birth != 0) {
arc_buf_t *abuf = NULL;
arc_flags_t aflags = ARC_FLAG_WAIT;
blkptr_t blk = *zio->io_bp;
int error;
ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
return (B_TRUE);
ddt_exit(ddt);
error = arc_read(NULL, spa, &blk,
arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&aflags, &zio->io_bookmark);
if (error == 0) {
if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
zio->io_orig_size) != 0)
error = SET_ERROR(ENOENT);
arc_buf_destroy(abuf, &abuf);
}
ddt_enter(ddt);
return (error != 0);
}
}
return (B_FALSE);
}
static void
zio_ddt_child_write_done(zio_t *zio)
{
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
zio_link_t *zl = NULL;
ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
ddt_univ_phys_t *ddp = dde->dde_phys;
ddt_enter(ddt);
/* we're the lead, so once we're done there's no one else outstanding */
if (dde->dde_io->dde_lead_zio[p] == zio)
dde->dde_io->dde_lead_zio[p] = NULL;
ddt_univ_phys_t *orig = &dde->dde_io->dde_orig_phys;
if (zio->io_error != 0) {
/*
* The write failed, so we're about to abort the entire IO
* chain. We need to revert the entry back to what it was at
* the last time it was successfully extended.
*/
ddt_phys_copy(ddp, orig, v);
ddt_phys_clear(orig, v);
ddt_exit(ddt);
return;
}
/*
* We've successfully added new DVAs to the entry. Clear the saved
* state or, if there's still outstanding IO, remember it so we can
* revert to a known good state if that IO fails.
*/
if (dde->dde_io->dde_lead_zio[p] == NULL)
ddt_phys_clear(orig, v);
else
ddt_phys_copy(orig, ddp, v);
/*
* Add references for all dedup writes that were waiting on the
* physical one, skipping any other physical writes that are waiting.
*/
zio_t *pio;
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
ddt_phys_addref(ddp, v);
}
ddt_exit(ddt);
}
static void
zio_ddt_child_write_ready(zio_t *zio)
{
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
zio_link_t *zl = NULL;
ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
if (zio->io_error != 0)
return;
ddt_enter(ddt);
ddt_phys_extend(dde->dde_phys, v, zio->io_bp);
zio_t *pio;
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
ddt_bp_fill(dde->dde_phys, v, pio->io_bp, zio->io_txg);
}
ddt_exit(ddt);
}
static zio_t *
zio_ddt_write(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t txg = zio->io_txg;
zio_prop_t *zp = &zio->io_prop;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
/*
* Deduplication will not take place for Direct I/O writes. The
* ddt_tree will be emptied in syncing context. Direct I/O writes take
* place in the open-context. Direct I/O write can not attempt to
* modify the ddt_tree while issuing out a write.
*/
ASSERT3B(zio->io_prop.zp_direct_write, ==, B_FALSE);
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp);
if (dde == NULL) {
/* DDT size is over its quota so no new entries */
zp->zp_dedup = B_FALSE;
BP_SET_DEDUP(bp, B_FALSE);
if (zio->io_bp_override == NULL)
zio->io_pipeline = ZIO_WRITE_PIPELINE;
ddt_exit(ddt);
return (zio);
}
if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
/*
* If we're using a weak checksum, upgrade to a strong checksum
* and try again. If we're already using a strong checksum,
* we can't resolve it, so just convert to an ordinary write.
* (And automatically e-mail a paper to Nature?)
*/
if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP)) {
zp->zp_checksum = spa_dedup_checksum(spa);
zio_pop_transforms(zio);
zio->io_stage = ZIO_STAGE_OPEN;
BP_ZERO(bp);
} else {
zp->zp_dedup = B_FALSE;
BP_SET_DEDUP(bp, B_FALSE);
}
ASSERT(!BP_GET_DEDUP(bp));
zio->io_pipeline = ZIO_WRITE_PIPELINE;
ddt_exit(ddt);
return (zio);
}
int p = DDT_PHYS_FOR_COPIES(ddt, zp->zp_copies);
ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
ddt_univ_phys_t *ddp = dde->dde_phys;
/*
* In the common cases, at this point we have a regular BP with no
* allocated DVAs, and the corresponding DDT entry for its checksum.
* Our goal is to fill the BP with enough DVAs to satisfy its copies=
* requirement.
*
* One of three things needs to happen to fulfill this:
*
* - if the DDT entry has enough DVAs to satisfy the BP, we just copy
* them out of the entry and return;
*
* - if the DDT entry has no DVAs (ie its brand new), then we have to
* issue the write as normal so that DVAs can be allocated and the
* data land on disk. We then copy the DVAs into the DDT entry on
* return.
*
* - if the DDT entry has some DVAs, but too few, we have to issue the
* write, adjusted to have allocate fewer copies. When it returns, we
* add the new DVAs to the DDT entry, and update the BP to have the
* full amount it originally requested.
*
* In all cases, if there's already a writing IO in flight, we need to
* defer the action until after the write is done. If our action is to
* write, we need to adjust our request for additional DVAs to match
* what will be in the DDT entry after it completes. In this way every
* IO can be guaranteed to recieve enough DVAs simply by joining the
* end of the chain and letting the sequence play out.
*/
/*
* Number of DVAs in the DDT entry. If the BP is encrypted we ignore
* the third one as normal.
*/
int have_dvas = ddt_phys_dva_count(ddp, v, BP_IS_ENCRYPTED(bp));
IMPLY(have_dvas == 0, ddt_phys_birth(ddp, v) == 0);
/* Number of DVAs requested bya the IO. */
uint8_t need_dvas = zp->zp_copies;
/*
* What we do next depends on whether or not there's IO outstanding that
* will update this entry.
*/
if (dde->dde_io == NULL || dde->dde_io->dde_lead_zio[p] == NULL) {
/*
* No IO outstanding, so we only need to worry about ourselves.
*/
/*
* Override BPs bring their own DVAs and their own problems.
*/
if (zio->io_bp_override) {
/*
* For a brand-new entry, all the work has been done
* for us, and we can just fill it out from the provided
* block and leave.
*/
if (have_dvas == 0) {
ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg);
ASSERT(BP_EQUAL(bp, zio->io_bp_override));
ddt_phys_extend(ddp, v, bp);
ddt_phys_addref(ddp, v);
ddt_exit(ddt);
return (zio);
}
/*
* If we already have this entry, then we want to treat
* it like a regular write. To do this we just wipe
* them out and proceed like a regular write.
*
* Even if there are some DVAs in the entry, we still
* have to clear them out. We can't use them to fill
* out the dedup entry, as they are all referenced
* together by a bp already on disk, and will be freed
* as a group.
*/
BP_ZERO_DVAS(bp);
BP_SET_BIRTH(bp, 0, 0);
}
/*
* If there are enough DVAs in the entry to service our request,
* then we can just use them as-is.
*/
if (have_dvas >= need_dvas) {
ddt_bp_fill(ddp, v, bp, txg);
ddt_phys_addref(ddp, v);
ddt_exit(ddt);
return (zio);
}
/*
* Otherwise, we have to issue IO to fill the entry up to the
* amount we need.
*/
need_dvas -= have_dvas;
} else {
/*
* There's a write in-flight. If there's already enough DVAs on
* the entry, then either there were already enough to start
* with, or the in-flight IO is between READY and DONE, and so
* has extended the entry with new DVAs. Either way, we don't
* need to do anything, we can just slot in behind it.
*/
if (zio->io_bp_override) {
/*
* If there's a write out, then we're soon going to
* have our own copies of this block, so clear out the
* override block and treat it as a regular dedup
* write. See comment above.
*/
BP_ZERO_DVAS(bp);
BP_SET_BIRTH(bp, 0, 0);
}
if (have_dvas >= need_dvas) {
/*
* A minor point: there might already be enough
* committed DVAs in the entry to service our request,
* but we don't know which are completed and which are
* allocated but not yet written. In this case, should
* the IO for the new DVAs fail, we will be on the end
* of the IO chain and will also recieve an error, even
* though our request could have been serviced.
*
* This is an extremely rare case, as it requires the
* original block to be copied with a request for a
* larger number of DVAs, then copied again requesting
* the same (or already fulfilled) number of DVAs while
* the first request is active, and then that first
* request errors. In return, the logic required to
* catch and handle it is complex. For now, I'm just
* not going to bother with it.
*/
/*
* We always fill the bp here as we may have arrived
* after the in-flight write has passed READY, and so
* missed out.
*/
ddt_bp_fill(ddp, v, bp, txg);
zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
ddt_exit(ddt);
return (zio);
}
/*
* There's not enough in the entry yet, so we need to look at
* the write in-flight and see how many DVAs it will have once
* it completes.
*
* The in-flight write has potentially had its copies request
* reduced (if we're filling out an existing entry), so we need
* to reach in and get the original write to find out what it is
* expecting.
*
* Note that the parent of the lead zio will always have the
* highest zp_copies of any zio in the chain, because ones that
* can be serviced without additional IO are always added to
* the back of the chain.
*/
zio_link_t *zl = NULL;
zio_t *pio =
zio_walk_parents(dde->dde_io->dde_lead_zio[p], &zl);
ASSERT(pio);
uint8_t parent_dvas = pio->io_prop.zp_copies;
if (parent_dvas >= need_dvas) {
zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
ddt_exit(ddt);
return (zio);
}
/*
* Still not enough, so we will need to issue to get the
* shortfall.
*/
need_dvas -= parent_dvas;
}
/*
* We need to write. We will create a new write with the copies
* property adjusted to match the number of DVAs we need to need to
* grow the DDT entry by to satisfy the request.
*/
zio_prop_t czp = *zp;
czp.zp_copies = need_dvas;
zio_t *cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
zio->io_orig_size, zio->io_orig_size, &czp,
zio_ddt_child_write_ready, NULL,
zio_ddt_child_write_done, dde, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
/*
* We are the new lead zio, because our parent has the highest
* zp_copies that has been requested for this entry so far.
*/
ddt_alloc_entry_io(dde);
if (dde->dde_io->dde_lead_zio[p] == NULL) {
/*
* First time out, take a copy of the stable entry to revert
* to if there's an error (see zio_ddt_child_write_done())
*/
ddt_phys_copy(&dde->dde_io->dde_orig_phys, dde->dde_phys, v);
} else {
/*
* Make the existing chain our child, because it cannot
* complete until we have.
*/
zio_add_child(cio, dde->dde_io->dde_lead_zio[p]);
}
dde->dde_io->dde_lead_zio[p] = cio;
ddt_exit(ddt);
zio_nowait(cio);
return (zio);
}
static ddt_entry_t *freedde; /* for debugging */
static zio_t *
zio_ddt_free(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde = NULL;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ddt_enter(ddt);
freedde = dde = ddt_lookup(ddt, bp);
if (dde) {
ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
if (v != DDT_PHYS_NONE)
ddt_phys_decref(dde->dde_phys, v);
}
ddt_exit(ddt);
/*
* When no entry was found, it must have been pruned,
* so we can free it now instead of decrementing the
* refcount in the DDT.
*/
if (!dde) {
BP_SET_DEDUP(bp, 0);
zio->io_pipeline |= ZIO_STAGE_DVA_FREE;
}
return (zio);
}
/*
* ==========================================================================
* Allocate and free blocks
* ==========================================================================
*/
static zio_t *
zio_io_to_allocate(spa_t *spa, int allocator)
{
zio_t *zio;
ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
if (zio == NULL)
return (NULL);
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(ZIO_HAS_ALLOCATOR(zio));
/*
* Try to place a reservation for this zio. If we're unable to
* reserve then we throttle.
*/
ASSERT3U(zio->io_allocator, ==, allocator);
if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
zio->io_prop.zp_copies, allocator, zio, 0)) {
return (NULL);
}
avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
return (zio);
}
static zio_t *
zio_dva_throttle(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_t *nio;
metaslab_class_t *mc;
/* locate an appropriate allocation class */
mc = spa_preferred_class(spa, zio);
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
!mc->mc_alloc_throttle_enabled ||
zio->io_child_type == ZIO_CHILD_GANG ||
zio->io_flags & ZIO_FLAG_NODATA) {
return (zio);
}
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(ZIO_HAS_ALLOCATOR(zio));
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
ASSERT3U(zio->io_queued_timestamp, >, 0);
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
int allocator = zio->io_allocator;
zio->io_metaslab_class = mc;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
nio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
return (nio);
}
static void
zio_allocate_dispatch(spa_t *spa, int allocator)
{
zio_t *zio;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
zio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
if (zio == NULL)
return;
ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
ASSERT0(zio->io_error);
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
}
static zio_t *
zio_dva_allocate(zio_t *zio)
{
spa_t *spa = zio->io_spa;
metaslab_class_t *mc;
blkptr_t *bp = zio->io_bp;
int error;
int flags = 0;
if (zio->io_gang_leader == NULL) {
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
}
ASSERT(BP_IS_HOLE(bp));
ASSERT0(BP_GET_NDVAS(bp));
ASSERT3U(zio->io_prop.zp_copies, >, 0);
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
if (zio->io_flags & ZIO_FLAG_NODATA)
flags |= METASLAB_DONT_THROTTLE;
if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
flags |= METASLAB_GANG_CHILD;
if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
flags |= METASLAB_ASYNC_ALLOC;
/*
* if not already chosen, locate an appropriate allocation class
*/
mc = zio->io_metaslab_class;
if (mc == NULL) {
mc = spa_preferred_class(spa, zio);
zio->io_metaslab_class = mc;
}
+ ZIOSTAT_BUMP(ziostat_total_allocations);
/*
* Try allocating the block in the usual metaslab class.
* If that's full, allocate it in the normal class.
* If that's full, allocate as a gang block,
* and if all are full, the allocation fails (which shouldn't happen).
*
* Note that we do not fall back on embedded slog (ZIL) space, to
* preserve unfragmented slog space, which is critical for decent
* sync write performance. If a log allocation fails, we will fall
* back to spa_sync() which is abysmal for performance.
*/
ASSERT(ZIO_HAS_ALLOCATOR(zio));
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
/*
* Fallback to normal class when an alloc class is full
*/
if (error == ENOSPC && mc != spa_normal_class(spa)) {
/*
* When the dedup or special class is spilling into the normal
* class, there can still be significant space available due
* to deferred frees that are in-flight. We track the txg when
* this occurred and back off adding new DDT entries for a few
* txgs to allow the free blocks to be processed.
*/
if ((mc == spa_dedup_class(spa) || (spa_special_has_ddt(spa) &&
mc == spa_special_class(spa))) &&
spa->spa_dedup_class_full_txg != zio->io_txg) {
spa->spa_dedup_class_full_txg = zio->io_txg;
zfs_dbgmsg("%s[%d]: %s class spilling, req size %d, "
"%llu allocated of %llu",
spa_name(spa), (int)zio->io_txg,
mc == spa_dedup_class(spa) ? "dedup" : "special",
(int)zio->io_size,
(u_longlong_t)metaslab_class_get_alloc(mc),
(u_longlong_t)metaslab_class_get_space(mc));
}
/*
* If throttling, transfer reservation over to normal class.
* The io_allocator slot can remain the same even though we
* are switching classes.
*/
if (mc->mc_alloc_throttle_enabled &&
(zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
metaslab_class_throttle_unreserve(mc,
zio->io_prop.zp_copies, zio->io_allocator, zio);
zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
VERIFY(metaslab_class_throttle_reserve(
spa_normal_class(spa),
zio->io_prop.zp_copies, zio->io_allocator, zio,
flags | METASLAB_MUST_RESERVE));
}
zio->io_metaslab_class = mc = spa_normal_class(spa);
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying normal class: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
+ ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
}
if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying ganging: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
+ ZIOSTAT_BUMP(ziostat_gang_writes);
+ if (flags & METASLAB_GANG_CHILD)
+ ZIOSTAT_BUMP(ziostat_gang_multilevel);
return (zio_write_gang_block(zio, mc));
}
if (error != 0) {
if (error != ENOSPC ||
(zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
"size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
zio->io_error = error;
}
return (zio);
}
static zio_t *
zio_dva_free(zio_t *zio)
{
metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
return (zio);
}
static zio_t *
zio_dva_claim(zio_t *zio)
{
int error;
error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
if (error)
zio->io_error = error;
return (zio);
}
/*
* Undo an allocation. This is used by zio_done() when an I/O fails
* and we want to give back the block we just allocated.
* This handles both normal blocks and gang blocks.
*/
static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp)) {
metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp),
B_TRUE);
}
if (gn != NULL) {
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
zio_dva_unallocate(zio, gn->gn_child[g],
&gn->gn_gbh->zg_blkptr[g]);
}
}
}
/*
* Try to allocate an intent log block. Return 0 on success, errno on failure.
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
uint64_t size, boolean_t *slog)
{
int error = 1;
zio_alloc_list_t io_alloc_list;
ASSERT(txg > spa_syncing_txg(spa));
metaslab_trace_init(&io_alloc_list);
/*
* Block pointer fields are useful to metaslabs for stats and debugging.
* Fill in the obvious ones before calling into metaslab_alloc().
*/
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_PSIZE(new_bp, size);
BP_SET_LEVEL(new_bp, 0);
/*
* When allocating a zil block, we don't have information about
* the final destination of the block except the objset it's part
* of, so we just hash the objset ID to pick the allocator to get
* some parallelism.
*/
int flags = METASLAB_ZIL;
int allocator = (uint_t)cityhash1(os->os_dsl_dataset->ds_object)
% spa->spa_alloc_count;
+ ZIOSTAT_BUMP(ziostat_total_allocations);
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
txg, NULL, flags, &io_alloc_list, NULL, allocator);
*slog = (error == 0);
if (error != 0) {
error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
if (error != 0) {
+ ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
BP_SET_LSIZE(new_bp, size);
BP_SET_PSIZE(new_bp, size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_LEVEL(new_bp, 0);
BP_SET_DEDUP(new_bp, 0);
BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
/*
* encrypted blocks will require an IV and salt. We generate
* these now since we will not be rewriting the bp at
* rewrite time.
*/
if (os->os_encrypted) {
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t salt[ZIO_DATA_SALT_LEN];
BP_SET_CRYPT(new_bp, B_TRUE);
VERIFY0(spa_crypt_get_salt(spa,
dmu_objset_id(os), salt));
VERIFY0(zio_crypt_generate_iv(iv));
zio_crypt_encode_params_bp(new_bp, salt, iv);
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
"size %llu, error %d", spa_name(spa), (u_longlong_t)size,
error);
}
return (error);
}
/*
* ==========================================================================
* Read and write to physical devices
* ==========================================================================
*/
/*
* Issue an I/O to the underlying vdev. Typically the issue pipeline
* stops after this stage and will resume upon I/O completion.
* However, there are instances where the vdev layer may need to
* continue the pipeline when an I/O was not issued. Since the I/O
* that was sent to the vdev layer might be different than the one
* currently active in the pipeline (see vdev_queue_io()), we explicitly
* force the underlying vdev layers to call either zio_execute() or
* zio_interrupt() to ensure that the pipeline continues with the correct I/O.
*/
static zio_t *
zio_vdev_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
uint64_t align;
spa_t *spa = zio->io_spa;
zio->io_delay = 0;
ASSERT(zio->io_error == 0);
ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
if (vd == NULL) {
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
/*
* The mirror_ops handle multiple DVAs in a single BP.
*/
vdev_mirror_ops.vdev_op_io_start(zio);
return (NULL);
}
ASSERT3P(zio->io_logical, !=, zio);
if (zio->io_type == ZIO_TYPE_WRITE) {
ASSERT(spa->spa_trust_config);
/*
* Note: the code can handle other kinds of writes,
* but we don't expect them.
*/
if (zio->io_vd->vdev_noalloc) {
ASSERT(zio->io_flags &
(ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
}
}
align = 1ULL << vd->vdev_top->vdev_ashift;
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
P2PHASE(zio->io_size, align) != 0) {
/* Transform logical writes to be a full physical block size. */
uint64_t asize = P2ROUNDUP(zio->io_size, align);
abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
ASSERT(vd == vd->vdev_top);
if (zio->io_type == ZIO_TYPE_WRITE) {
abd_copy(abuf, zio->io_abd, zio->io_size);
abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
}
zio_push_transform(zio, abuf, asize, asize, zio_subblock);
}
/*
* If this is not a physical io, make sure that it is properly aligned
* before proceeding.
*/
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
ASSERT0(P2PHASE(zio->io_offset, align));
ASSERT0(P2PHASE(zio->io_size, align));
} else {
/*
* For physical writes, we allow 512b aligned writes and assume
* the device will perform a read-modify-write as necessary.
*/
ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
}
VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
/*
* If this is a repair I/O, and there's no self-healing involved --
* that is, we're just resilvering what we expect to resilver --
* then don't do the I/O unless zio's txg is actually in vd's DTL.
* This prevents spurious resilvering.
*
* There are a few ways that we can end up creating these spurious
* resilver i/os:
*
* 1. A resilver i/o will be issued if any DVA in the BP has a
* dirty DTL. The mirror code will issue resilver writes to
* each DVA, including the one(s) that are not on vdevs with dirty
* DTLs.
*
* 2. With nested replication, which happens when we have a
* "replacing" or "spare" vdev that's a child of a mirror or raidz.
* For example, given mirror(replacing(A+B), C), it's likely that
* only A is out of date (it's the new device). In this case, we'll
* read from C, then use the data to resilver A+B -- but we don't
* actually want to resilver B, just A. The top-level mirror has no
* way to know this, so instead we just discard unnecessary repairs
* as we work our way down the vdev tree.
*
* 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
* The same logic applies to any form of nested replication: ditto
* + mirror, RAID-Z + replacing, etc.
*
* However, indirect vdevs point off to other vdevs which may have
* DTL's, so we never bypass them. The child i/os on concrete vdevs
* will be properly bypassed instead.
*
* Leaf DTL_PARTIAL can be empty when a legitimate write comes from
* a dRAID spare vdev. For example, when a dRAID spare is first
* used, its spare blocks need to be written to but the leaf vdev's
* of such blocks can have empty DTL_PARTIAL.
*
* There seemed no clean way to allow such writes while bypassing
* spurious ones. At this point, just avoid all bypassing for dRAID
* for correctness.
*/
if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
!(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
zio->io_txg != 0 && /* not a delegated i/o */
vd->vdev_ops != &vdev_indirect_ops &&
vd->vdev_top->vdev_ops != &vdev_draid_ops &&
!vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
zio_vdev_io_bypass(zio);
return (zio);
}
/*
* Select the next best leaf I/O to process. Distributed spares are
* excluded since they dispatch the I/O directly to a leaf vdev after
* applying the dRAID mapping.
*/
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops &&
(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE ||
zio->io_type == ZIO_TYPE_TRIM)) {
- if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) {
- /*
- * "no-op" injections return success, but do no actual
- * work. Just skip the remaining vdev stages.
- */
- zio_vdev_io_bypass(zio);
- zio_interrupt(zio);
- return (NULL);
- }
-
if ((zio = vdev_queue_io(zio)) == NULL)
return (NULL);
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return (NULL);
}
zio->io_delay = gethrtime();
+
+ if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) {
+ /*
+ * "no-op" injections return success, but do no actual
+ * work. Just return it.
+ */
+ zio_delay_interrupt(zio);
+ return (NULL);
+ }
}
vd->vdev_ops->vdev_op_io_start(zio);
return (NULL);
}
static zio_t *
zio_vdev_io_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
boolean_t unexpected_error = B_FALSE;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE ||
zio->io_type == ZIO_TYPE_FLUSH ||
zio->io_type == ZIO_TYPE_TRIM);
if (zio->io_delay)
zio->io_delay = gethrtime() - zio->io_delay;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
if (zio->io_type != ZIO_TYPE_FLUSH)
vdev_queue_io_done(zio);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_device_injections(vd, zio,
EIO, EILSEQ);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_label_injection(zio, EIO);
if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH &&
zio->io_type != ZIO_TYPE_TRIM) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
unexpected_error = B_TRUE;
}
}
}
ops->vdev_op_io_done(zio);
if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
VERIFY(vdev_probe(vd, zio) == NULL);
return (zio);
}
/*
* This function is used to change the priority of an existing zio that is
* currently in-flight. This is used by the arc to upgrade priority in the
* event that a demand read is made for a block that is currently queued
* as a scrub or async read IO. Otherwise, the high priority read request
* would end up having to wait for the lower priority IO.
*/
void
zio_change_priority(zio_t *pio, zio_priority_t priority)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
vdev_queue_change_io_priority(pio, priority);
} else {
pio->io_priority = priority;
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_change_priority(cio, priority);
}
mutex_exit(&pio->io_lock);
}
/*
* For non-raidz ZIOs, we can just copy aside the bad data read from the
* disk, and use that to finish the checksum ereport later.
*/
static void
zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
const abd_t *good_buf)
{
/* no processing needed */
zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
}
void
zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
{
void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
abd_copy(abd, zio->io_abd, zio->io_size);
zcr->zcr_cbinfo = zio->io_size;
zcr->zcr_cbdata = abd;
zcr->zcr_finish = zio_vsd_default_cksum_finish;
zcr->zcr_free = zio_abd_free;
}
static zio_t *
zio_vdev_io_assess(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_exit(zio->io_spa, SCL_ZIO, zio);
if (zio->io_vsd != NULL) {
zio->io_vsd_ops->vsd_free(zio);
zio->io_vsd = NULL;
}
/*
* If a Direct I/O operation has a checksum verify error then this I/O
* should not attempt to be issued again.
*/
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) {
if (zio->io_type == ZIO_TYPE_WRITE) {
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_LOGICAL);
ASSERT3U(zio->io_error, ==, EIO);
}
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_fault_injection(zio, EIO);
/*
* If the I/O failed, determine whether we should attempt to retry it.
*
* On retry, we cut in line in the issue queue, since we don't want
* compression/checksumming/etc. work to prevent our (cheap) IO reissue.
*/
if (zio->io_error && vd == NULL &&
!(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
zio->io_error = 0;
zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
zio_requeue_io_start_cut_in_line);
return (NULL);
}
/*
* If we got an error on a leaf device, convert it to ENXIO
* if the device is not accessible at all.
*/
if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
!vdev_accessible(vd, zio))
zio->io_error = SET_ERROR(ENXIO);
/*
* If we can't write to an interior vdev (mirror or RAID-Z),
* set vdev_cant_write so that we stop trying to allocate from it.
*/
if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
"cant_write=TRUE due to write failure with ENXIO",
zio);
vd->vdev_cant_write = B_TRUE;
}
/*
* If a cache flush returns ENOTSUP we know that no future
* attempts will ever succeed. In this case we set a persistent
* boolean flag so that we don't bother with it in the future, and
* then we act like the flush succeeded.
*/
if (zio->io_error == ENOTSUP && zio->io_type == ZIO_TYPE_FLUSH &&
vd != NULL) {
vd->vdev_nowritecache = B_TRUE;
zio->io_error = 0;
}
if (zio->io_error)
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
void
zio_vdev_io_reissue(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_stage >>= 1;
}
void
zio_vdev_io_redone(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
zio->io_stage >>= 1;
}
void
zio_vdev_io_bypass(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
}
/*
* ==========================================================================
* Encrypt and store encryption parameters
* ==========================================================================
*/
/*
* This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
* managing the storage of encryption parameters and passing them to the
* lower-level encryption functions.
*/
static zio_t *
zio_encrypt(zio_t *zio)
{
zio_prop_t *zp = &zio->io_prop;
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_GET_PSIZE(bp);
uint64_t dsobj = zio->io_bookmark.zb_objset;
dmu_object_type_t ot = BP_GET_TYPE(bp);
void *enc_buf = NULL;
abd_t *eabd = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/* the root zio already encrypted the data */
if (zio->io_child_type == ZIO_CHILD_GANG)
return (zio);
/* only ZIL blocks are re-encrypted on rewrite */
if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
return (zio);
if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
BP_SET_CRYPT(bp, B_FALSE);
return (zio);
}
/* if we are doing raw encryption set the provided encryption params */
if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
ASSERT0(BP_GET_LEVEL(bp));
BP_SET_CRYPT(bp, B_TRUE);
BP_SET_BYTEORDER(bp, zp->zp_byteorder);
if (ot != DMU_OT_OBJSET)
zio_crypt_encode_mac_bp(bp, zp->zp_mac);
/* dnode blocks must be written out in the provided byteorder */
if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
ot == DMU_OT_DNODE) {
void *bswap_buf = zio_buf_alloc(psize);
abd_t *babd = abd_get_from_buf(bswap_buf, psize);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
psize);
abd_take_ownership_of_buf(babd, B_TRUE);
zio_push_transform(zio, babd, psize, psize, NULL);
}
if (DMU_OT_IS_ENCRYPTED(ot))
zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
return (zio);
}
/* indirect blocks only maintain a cksum of the lower level MACs */
if (BP_GET_LEVEL(bp) > 0) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Objset blocks are a special case since they have 2 256-bit MACs
* embedded within them.
*/
if (ot == DMU_OT_OBJSET) {
ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
return (zio);
}
/* unencrypted object types are only authenticated with a MAC */
if (!DMU_OT_IS_ENCRYPTED(ot)) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Later passes of sync-to-convergence may decide to rewrite data
* in place to avoid more disk reallocations. This presents a problem
* for encryption because this constitutes rewriting the new data with
* the same encryption key and IV. However, this only applies to blocks
* in the MOS (particularly the spacemaps) and we do not encrypt the
* MOS. We assert that the zio is allocating or an intent log write
* to enforce this.
*/
ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
ASSERT3U(psize, !=, 0);
enc_buf = zio_buf_alloc(psize);
eabd = abd_get_from_buf(enc_buf, psize);
abd_take_ownership_of_buf(eabd, B_TRUE);
/*
* For an explanation of what encryption parameters are stored
* where, see the block comment in zio_crypt.c.
*/
if (ot == DMU_OT_INTENT_LOG) {
zio_crypt_decode_params_bp(bp, salt, iv);
} else {
BP_SET_CRYPT(bp, B_TRUE);
}
/* Perform the encryption. This should not fail */
VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
/* encode encryption metadata into the bp */
if (ot == DMU_OT_INTENT_LOG) {
/*
* ZIL blocks store the MAC in the embedded checksum, so the
* transform must always be applied.
*/
zio_crypt_encode_mac_zil(enc_buf, mac);
zio_push_transform(zio, eabd, psize, psize, NULL);
} else {
BP_SET_CRYPT(bp, B_TRUE);
zio_crypt_encode_params_bp(bp, salt, iv);
zio_crypt_encode_mac_bp(bp, mac);
if (no_crypt) {
ASSERT3U(ot, ==, DMU_OT_DNODE);
abd_free(eabd);
} else {
zio_push_transform(zio, eabd, psize, psize, NULL);
}
}
return (zio);
}
/*
* ==========================================================================
* Generate and verify checksums
* ==========================================================================
*/
static zio_t *
zio_checksum_generate(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum;
if (bp == NULL) {
/*
* This is zio_write_phys().
* We're either generating a label checksum, or none at all.
*/
checksum = zio->io_prop.zp_checksum;
if (checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT(checksum == ZIO_CHECKSUM_LABEL);
} else {
if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
ASSERT(!IO_IS_ALLOCATING(zio));
checksum = ZIO_CHECKSUM_GANG_HEADER;
} else {
checksum = BP_GET_CHECKSUM(bp);
}
}
zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
return (zio);
}
static zio_t *
zio_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t info;
blkptr_t *bp = zio->io_bp;
int error;
ASSERT(zio->io_vd != NULL);
if (bp == NULL) {
/*
* This is zio_read_phys().
* We're either verifying a label checksum, or nothing at all.
*/
if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
}
ASSERT0(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
IMPLY(zio->io_flags & ZIO_FLAG_DIO_READ,
!(zio->io_flags & ZIO_FLAG_SPECULATIVE));
if ((error = zio_checksum_error(zio, &info)) != 0) {
zio->io_error = error;
if (error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
if (zio->io_flags & ZIO_FLAG_DIO_READ) {
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
zio_t *pio = zio_unique_parent(zio);
/*
* Any Direct I/O read that has a checksum
* error must be treated as suspicous as the
* contents of the buffer could be getting
* manipulated while the I/O is taking place.
*
* The checksum verify error will only be
* reported here for disk and file VDEV's and
* will be reported on those that the failure
* occurred on. Other types of VDEV's report the
* verify failure in their own code paths.
*/
if (pio->io_child_type == ZIO_CHILD_LOGICAL) {
zio_dio_chksum_verify_error_report(zio);
}
} else {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_start_checksum(zio->io_spa,
zio->io_vd, &zio->io_bookmark, zio,
zio->io_offset, zio->io_size, &info);
}
}
}
return (zio);
}
static zio_t *
zio_dio_checksum_verify(zio_t *zio)
{
zio_t *pio = zio_unique_parent(zio);
int error;
ASSERT3P(zio->io_vd, !=, NULL);
ASSERT3P(zio->io_bp, !=, NULL);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3B(pio->io_prop.zp_direct_write, ==, B_TRUE);
ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL);
if (zfs_vdev_direct_write_verify == 0 || zio->io_error != 0)
goto out;
if ((error = zio_checksum_error(zio, NULL)) != 0) {
zio->io_error = error;
if (error == ECKSUM) {
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
zio_dio_chksum_verify_error_report(zio);
}
}
out:
return (zio);
}
/*
* Called by RAID-Z to ensure we don't compute the checksum twice.
*/
void
zio_checksum_verified(zio_t *zio)
{
zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
/*
* Report Direct I/O checksum verify error and create ZED event.
*/
void
zio_dio_chksum_verify_error_report(zio_t *zio)
{
ASSERT(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
if (zio->io_child_type == ZIO_CHILD_LOGICAL)
return;
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_dio_verify_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
if (zio->io_type == ZIO_TYPE_WRITE) {
/*
* Convert checksum error for writes into EIO.
*/
zio->io_error = SET_ERROR(EIO);
/*
* Report dio_verify_wr ZED event.
*/
(void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_WR,
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
} else {
/*
* Report dio_verify_rd ZED event.
*/
(void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_RD,
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
}
}
/*
* ==========================================================================
* Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
* An error of 0 indicates success. ENXIO indicates whole-device failure,
* which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
* indicate errors that are specific to one I/O, and most likely permanent.
* Any other error is presumed to be worse because we weren't expecting it.
* ==========================================================================
*/
int
zio_worst_error(int e1, int e2)
{
static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
int r1, r2;
for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
if (e1 == zio_error_rank[r1])
break;
for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
if (e2 == zio_error_rank[r2])
break;
return (r1 > r2 ? e1 : e2);
}
/*
* ==========================================================================
* I/O completion
* ==========================================================================
*/
static zio_t *
zio_ready(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
return (NULL);
}
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg ||
BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
zio->io_ready(zio);
}
#ifdef ZFS_DEBUG
if (bp != NULL && bp != &zio->io_bp_copy)
zio->io_bp_copy = *bp;
#endif
if (zio->io_error != 0) {
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_metaslab_class != NULL);
ASSERT(ZIO_HAS_ALLOCATOR(zio));
/*
* We were unable to allocate anything, unreserve and
* issue the next I/O to allocate.
*/
metaslab_class_throttle_unreserve(
zio->io_metaslab_class, zio->io_prop.zp_copies,
zio->io_allocator, zio);
zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
}
}
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_READY] = 1;
pio = zio_walk_parents(zio, &zl);
mutex_exit(&zio->io_lock);
/*
* As we notify zio's parents, new parents could be added.
* New parents go to the head of zio's io_parent_list, however,
* so we will (correctly) not notify them. The remainder of zio's
* io_parent_list, from 'pio_next' onward, cannot change because
* all parents must wait for us to be done before they can be done.
*/
for (; pio != NULL; pio = pio_next) {
pio_next = zio_walk_parents(zio, &zl);
zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
}
if (zio->io_flags & ZIO_FLAG_NODATA) {
if (bp != NULL && BP_IS_GANG(bp)) {
zio->io_flags &= ~ZIO_FLAG_NODATA;
} else {
ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
}
}
if (zio_injection_enabled &&
zio->io_spa->spa_syncing_txg == zio->io_txg)
zio_handle_ignored_writes(zio);
return (zio);
}
/*
* Update the allocation throttle accounting.
*/
static void
zio_dva_throttle_done(zio_t *zio)
{
zio_t *lio __maybe_unused = zio->io_logical;
zio_t *pio = zio_unique_parent(zio);
vdev_t *vd = zio->io_vd;
int flags = METASLAB_ASYNC_ALLOC;
ASSERT3P(zio->io_bp, !=, NULL);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
ASSERT(vd != NULL);
ASSERT3P(vd, ==, vd->vdev_top);
ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
/*
* Parents of gang children can have two flavors -- ones that
* allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
* and ones that allocated the constituent blocks. The allocation
* throttle needs to know the allocating parent zio so we must find
* it here.
*/
if (pio->io_child_type == ZIO_CHILD_GANG) {
/*
* If our parent is a rewrite gang child then our grandparent
* would have been the one that performed the allocation.
*/
if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
pio = zio_unique_parent(pio);
flags |= METASLAB_GANG_CHILD;
}
ASSERT(IO_IS_ALLOCATING(pio));
ASSERT(ZIO_HAS_ALLOCATOR(pio));
ASSERT3P(zio, !=, zio->io_logical);
ASSERT(zio->io_logical != NULL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
ASSERT(zio->io_metaslab_class != NULL);
mutex_enter(&pio->io_lock);
metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
pio->io_allocator, B_TRUE);
mutex_exit(&pio->io_lock);
metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
pio->io_allocator, pio);
/*
* Call into the pipeline to see if there is more work that
* needs to be done. If there is work to be done it will be
* dispatched to another taskq thread.
*/
zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
}
static zio_t *
zio_done(zio_t *zio)
{
/*
* Always attempt to keep stack usage minimal here since
* we can be called recursively up to 19 levels deep.
*/
const uint64_t psize = zio->io_size;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
/*
* If our children haven't all completed,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
return (NULL);
}
/*
* If the allocation throttle is enabled, then update the accounting.
* We only track child I/Os that are part of an allocating async
* write. We must do this since the allocation is performed
* by the logical I/O but the actual write is done by child I/Os.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
zio->io_child_type == ZIO_CHILD_VDEV) {
ASSERT(zio->io_metaslab_class != NULL);
ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
zio_dva_throttle_done(zio);
}
/*
* If the allocation throttle is enabled, verify that
* we have decremented the refcounts for every I/O that was throttled.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_bp != NULL);
ASSERT(ZIO_HAS_ALLOCATOR(zio));
metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
zio->io_allocator);
VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
}
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
ASSERT(zio->io_children[c][w] == 0);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(zio->io_bp->blk_pad[0] == 0);
ASSERT(zio->io_bp->blk_pad[1] == 0);
ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
sizeof (blkptr_t)) == 0 ||
(zio->io_bp == zio_unique_parent(zio)->io_bp));
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
zio->io_bp_override == NULL &&
!(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
ASSERT3U(zio->io_prop.zp_copies, <=,
BP_GET_NDVAS(zio->io_bp));
ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
(BP_COUNT_GANG(zio->io_bp) ==
BP_GET_NDVAS(zio->io_bp)));
}
if (zio->io_flags & ZIO_FLAG_NOPWRITE)
VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
}
/*
* If there were child vdev/gang/ddt errors, they apply to us now.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
/*
* If the I/O on the transformed data was successful, generate any
* checksum reports now while we still have the transformed data.
*/
if (zio->io_error == 0) {
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
uint64_t align = zcr->zcr_align;
uint64_t asize = P2ROUNDUP(psize, align);
abd_t *adata = zio->io_abd;
if (adata != NULL && asize != psize) {
adata = abd_alloc(asize, B_TRUE);
abd_copy(adata, zio->io_abd, psize);
abd_zero_off(adata, psize, asize - psize);
}
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, adata);
zfs_ereport_free_checksum(zcr);
if (adata != NULL && asize != psize)
abd_free(adata);
}
}
zio_pop_transforms(zio); /* note: may set zio->io_error */
vdev_stat_update(zio, psize);
/*
* If this I/O is attached to a particular vdev is slow, exceeding
* 30 seconds to complete, post an error described the I/O delay.
* We ignore these errors if the device is currently unavailable.
*/
if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
/*
* We want to only increment our slow IO counters if
* the IO is valid (i.e. not if the drive is removed).
*
* zfs_ereport_post() will also do these checks, but
* it can also ratelimit and have other failures, so we
* need to increment the slow_io counters independent
* of it.
*/
if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, zio)) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_slow_ios++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, &zio->io_bookmark,
zio, 0);
}
}
}
if (zio->io_error) {
/*
* If this I/O is attached to a particular vdev,
* generate an error message describing the I/O failure
* at the block level. We ignore these errors if the
* device is currently unavailable.
*/
if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
!vdev_is_dead(zio->io_vd) &&
!(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) {
int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
if (ret != EALREADY) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
if (zio->io_type == ZIO_TYPE_READ)
zio->io_vd->vdev_stat.vs_read_errors++;
else if (zio->io_type == ZIO_TYPE_WRITE)
zio->io_vd->vdev_stat.vs_write_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
}
}
if ((zio->io_error == EIO || !(zio->io_flags &
(ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
!(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) &&
zio == zio->io_logical) {
/*
* For logical I/O requests, tell the SPA to log the
* error and generate a logical data ereport.
*/
spa_log_error(zio->io_spa, &zio->io_bookmark,
BP_GET_LOGICAL_BIRTH(zio->io_bp));
(void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
}
}
if (zio->io_error && zio == zio->io_logical) {
/*
* Determine whether zio should be reexecuted. This will
* propagate all the way to the root via zio_notify_parent().
*/
ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (IO_IS_ALLOCATING(zio) &&
!(zio->io_flags & ZIO_FLAG_CANFAIL) &&
!(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) {
if (zio->io_error != ENOSPC)
zio->io_reexecute |= ZIO_REEXECUTE_NOW;
else
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
}
if ((zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_FREE) &&
!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
zio->io_error == ENXIO &&
spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
/*
* Here is a possibly good place to attempt to do
* either combinatorial reconstruction or error correction
* based on checksums. It also might be a good place
* to send out preliminary ereports before we suspend
* processing.
*/
}
/*
* If there were logical child errors, they apply to us now.
* We defer this until now to avoid conflating logical child
* errors with errors that happened to the zio itself when
* updating vdev stats and reporting FMA events above.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
if ((zio->io_error || zio->io_reexecute) &&
IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
!(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
zio_gang_tree_free(&zio->io_gang_tree);
/*
* Godfather I/Os should never suspend.
*/
if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
if (zio->io_reexecute) {
/*
* A Direct I/O operation that has a checksum verify error
* should not attempt to reexecute. Instead, the error should
* just be propagated back.
*/
ASSERT(!(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR));
/*
* This is a logical I/O that wants to reexecute.
*
* Reexecute is top-down. When an i/o fails, if it's not
* the root, it simply notifies its parent and sticks around.
* The parent, seeing that it still has children in zio_done(),
* does the same. This percolates all the way up to the root.
* The root i/o will reexecute or suspend the entire tree.
*
* This approach ensures that zio_reexecute() honors
* all the original i/o dependency relationships, e.g.
* parents not executing until children are ready.
*/
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
zio->io_gang_leader = NULL;
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* "The Godfather" I/O monitors its children but is
* not a true parent to them. It will track them through
* the pipeline but severs its ties whenever they get into
* trouble (e.g. suspended). This allows "The Godfather"
* I/O to return status without blocking.
*/
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL;
pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
zio_remove_child(pio, zio, remove_zl);
/*
* This is a rare code path, so we don't
* bother with "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
NULL);
}
}
if ((pio = zio_unique_parent(zio)) != NULL) {
/*
* We're not a root i/o, so there's nothing to do
* but notify our parent. Don't propagate errors
* upward since we haven't permanently failed yet.
*/
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
/*
* This is a rare code path, so we don't bother with
* "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
/*
* We'd fail again if we reexecuted now, so suspend
* until conditions improve (e.g. device comes online).
*/
zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
} else {
/*
* Reexecution is potentially a huge amount of work.
* Hand it off to the otherwise-unused claim taskq.
*/
spa_taskq_dispatch(zio->io_spa,
ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
zio_reexecute, zio, B_FALSE);
}
return (NULL);
}
ASSERT(list_is_empty(&zio->io_child_list));
ASSERT(zio->io_reexecute == 0);
ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
/*
* Report any checksum errors, since the I/O is complete.
*/
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, NULL);
zfs_ereport_free_checksum(zcr);
}
/*
* It is the responsibility of the done callback to ensure that this
* particular zio is no longer discoverable for adoption, and as
* such, cannot acquire any new parents.
*/
if (zio->io_done)
zio->io_done(zio);
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* We are done executing this zio. We may want to execute a parent
* next. See the comment in zio_notify_parent().
*/
zio_t *next_to_execute = NULL;
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
zio_remove_child(pio, zio, remove_zl);
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
}
if (zio->io_waiter != NULL) {
mutex_enter(&zio->io_lock);
zio->io_executor = NULL;
cv_broadcast(&zio->io_cv);
mutex_exit(&zio->io_lock);
} else {
zio_destroy(zio);
}
return (next_to_execute);
}
/*
* ==========================================================================
* I/O pipeline definition
* ==========================================================================
*/
static zio_pipe_stage_t *zio_pipeline[] = {
NULL,
zio_read_bp_init,
zio_write_bp_init,
zio_free_bp_init,
zio_issue_async,
zio_write_compress,
zio_encrypt,
zio_checksum_generate,
zio_nop_write,
zio_brt_free,
zio_ddt_read_start,
zio_ddt_read_done,
zio_ddt_write,
zio_ddt_free,
zio_gang_assemble,
zio_gang_issue,
zio_dva_throttle,
zio_dva_allocate,
zio_dva_free,
zio_dva_claim,
zio_ready,
zio_vdev_io_start,
zio_vdev_io_done,
zio_vdev_io_assess,
zio_checksum_verify,
zio_dio_checksum_verify,
zio_done
};
/*
* Compare two zbookmark_phys_t's to see which we would reach first in a
* pre-order traversal of the object tree.
*
* This is simple in every case aside from the meta-dnode object. For all other
* objects, we traverse them in order (object 1 before object 2, and so on).
* However, all of these objects are traversed while traversing object 0, since
* the data it points to is the list of objects. Thus, we need to convert to a
* canonical representation so we can compare meta-dnode bookmarks to
* non-meta-dnode bookmarks.
*
* We do this by calculating "equivalents" for each field of the zbookmark.
* zbookmarks outside of the meta-dnode use their own object and level, and
* calculate the level 0 equivalent (the first L0 blkid that is contained in the
* blocks this bookmark refers to) by multiplying their blkid by their span
* (the number of L0 blocks contained within one block at their level).
* zbookmarks inside the meta-dnode calculate their object equivalent
* (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
* level + 1<<31 (any value larger than a level could ever be) for their level.
* This causes them to always compare before a bookmark in their object
* equivalent, compare appropriately to bookmarks in other objects, and to
* compare appropriately to other bookmarks in the meta-dnode.
*/
int
zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
{
/*
* These variables represent the "equivalent" values for the zbookmark,
* after converting zbookmarks inside the meta dnode to their
* normal-object equivalents.
*/
uint64_t zb1obj, zb2obj;
uint64_t zb1L0, zb2L0;
uint64_t zb1level, zb2level;
if (zb1->zb_object == zb2->zb_object &&
zb1->zb_level == zb2->zb_level &&
zb1->zb_blkid == zb2->zb_blkid)
return (0);
IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
/*
* BP_SPANB calculates the span in blocks.
*/
zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb1L0 = 0;
zb1level = zb1->zb_level + COMPARE_META_LEVEL;
} else {
zb1obj = zb1->zb_object;
zb1level = zb1->zb_level;
}
if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb2L0 = 0;
zb2level = zb2->zb_level + COMPARE_META_LEVEL;
} else {
zb2obj = zb2->zb_object;
zb2level = zb2->zb_level;
}
/* Now that we have a canonical representation, do the comparison. */
if (zb1obj != zb2obj)
return (zb1obj < zb2obj ? -1 : 1);
else if (zb1L0 != zb2L0)
return (zb1L0 < zb2L0 ? -1 : 1);
else if (zb1level != zb2level)
return (zb1level > zb2level ? -1 : 1);
/*
* This can (theoretically) happen if the bookmarks have the same object
* and level, but different blkids, if the block sizes are not the same.
* There is presently no way to change the indirect block sizes
*/
return (0);
}
/*
* This function checks the following: given that last_block is the place that
* our traversal stopped last time, does that guarantee that we've visited
* every node under subtree_root? Therefore, we can't just use the raw output
* of zbookmark_compare. We have to pass in a modified version of
* subtree_root; by incrementing the block id, and then checking whether
* last_block is before or equal to that, we can tell whether or not having
* visited last_block implies that all of subtree_root's children have been
* visited.
*/
boolean_t
zbookmark_subtree_completed(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
zbookmark_phys_t mod_zb = *subtree_root;
mod_zb.zb_blkid++;
ASSERT0(last_block->zb_level);
/* The objset_phys_t isn't before anything. */
if (dnp == NULL)
return (B_FALSE);
/*
* We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
* data block size in sectors, because that variable is only used if
* the bookmark refers to a block in the meta-dnode. Since we don't
* know without examining it what object it refers to, and there's no
* harm in passing in this value in other cases, we always pass it in.
*
* We pass in 0 for the indirect block size shift because zb2 must be
* level 0. The indirect block size is only used to calculate the span
* of the bookmark, but since the bookmark must be level 0, the span is
* always 1, so the math works out.
*
* If you make changes to how the zbookmark_compare code works, be sure
* to make sure that this code still works afterwards.
*/
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
last_block) <= 0);
}
/*
* This function is similar to zbookmark_subtree_completed(), but returns true
* if subtree_root is equal or ahead of last_block, i.e. still to be done.
*/
boolean_t
zbookmark_subtree_tbd(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
ASSERT0(last_block->zb_level);
if (dnp == NULL)
return (B_FALSE);
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
last_block) >= 0);
}
EXPORT_SYMBOL(zio_type_name);
EXPORT_SYMBOL(zio_buf_alloc);
EXPORT_SYMBOL(zio_data_buf_alloc);
EXPORT_SYMBOL(zio_buf_free);
EXPORT_SYMBOL(zio_data_buf_free);
ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
"Max I/O completion time (milliseconds) before marking it as slow");
ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
"Prioritize requeued I/O");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
"Defer frees starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
"Don't compress starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
"Rewrite new bps starting in this pass");
ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
"Throttle block allocations in the ZIO pipeline");
ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
"Log all slow ZIOs, not just those with vdevs");
diff --git a/sys/contrib/openzfs/tests/runfiles/freebsd.run b/sys/contrib/openzfs/tests/runfiles/freebsd.run
index e1ae0c6b7721..943c8eab2715 100644
--- a/sys/contrib/openzfs/tests/runfiles/freebsd.run
+++ b/sys/contrib/openzfs/tests/runfiles/freebsd.run
@@ -1,36 +1,36 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/cli_root/zfs_jail:FreeBSD]
tests = ['zfs_jail_001_pos']
tags = ['functional', 'cli_root', 'zfs_jail']
[tests/functional/pam:FreeBSD]
-tests = ['pam_basic', 'pam_change_unmounted', 'pam_nounmount', 'pam_recursive',
- 'pam_short_password']
+tests = ['pam_basic', 'pam_change_unmounted', 'pam_mount_recursively',
+ 'pam_nounmount', 'pam_recursive', 'pam_short_password']
tags = ['functional', 'pam']
[tests/functional/direct:FreeBSD]
tests = ['dio_write_stable_pages']
tags = ['functional', 'direct']
diff --git a/sys/contrib/openzfs/tests/runfiles/linux.run b/sys/contrib/openzfs/tests/runfiles/linux.run
index e55ec583d2cc..275772f2820e 100644
--- a/sys/contrib/openzfs/tests/runfiles/linux.run
+++ b/sys/contrib/openzfs/tests/runfiles/linux.run
@@ -1,239 +1,239 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/posix:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix']
[tests/functional/acl/posix-sa:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix-sa']
[tests/functional/atime:Linux]
tests = ['atime_003_pos', 'root_relatime_on']
tags = ['functional', 'atime']
[tests/functional/block_cloning:Linux]
tests = ['block_cloning_ficlone', 'block_cloning_ficlonerange',
'block_cloning_ficlonerange_partial', 'block_cloning_disabled_ficlone',
'block_cloning_disabled_ficlonerange']
tags = ['functional', 'block_cloning']
[tests/functional/chattr:Linux]
tests = ['chattr_001_pos', 'chattr_002_neg']
tags = ['functional', 'chattr']
[tests/functional/cli_root/zfs:Linux]
tests = ['zfs_003_neg']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_mount:Linux]
tests = ['zfs_mount_006_pos', 'zfs_mount_008_pos', 'zfs_mount_013_pos',
'zfs_mount_014_neg', 'zfs_multi_mount']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_share:Linux]
tests = ['zfs_share_005_pos', 'zfs_share_007_neg', 'zfs_share_009_neg',
'zfs_share_012_pos', 'zfs_share_013_pos']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_unshare:Linux]
tests = ['zfs_unshare_008_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_sysfs:Linux]
tests = ['zfeature_set_unsupported', 'zfs_get_unsupported',
'zfs_set_unsupported', 'zfs_sysfs_live', 'zpool_get_unsupported',
'zpool_set_unsupported']
tags = ['functional', 'cli_root', 'zfs_sysfs']
[tests/functional/cli_root/zpool_add:Linux]
tests = ['add_nested_replacing_spare']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_expand:Linux]
tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos',
'zpool_expand_003_neg', 'zpool_expand_004_pos', 'zpool_expand_005_pos']
tags = ['functional', 'cli_root', 'zpool_expand']
[tests/functional/cli_root/zpool_import:Linux]
tests = ['zpool_import_hostid_changed',
'zpool_import_hostid_changed_unclean_export',
'zpool_import_hostid_changed_cachefile',
'zpool_import_hostid_changed_cachefile_unclean_export']
tags = ['functional', 'cli_root', 'zpool_import']
[tests/functional/cli_root/zpool_reopen:Linux]
tests = ['zpool_reopen_001_pos', 'zpool_reopen_002_pos',
'zpool_reopen_003_pos', 'zpool_reopen_004_pos', 'zpool_reopen_005_pos',
'zpool_reopen_006_neg', 'zpool_reopen_007_pos']
tags = ['functional', 'cli_root', 'zpool_reopen']
[tests/functional/cli_root/zpool_split:Linux]
tests = ['zpool_split_wholedisk']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/compression:Linux]
tests = ['compress_004_pos']
tags = ['functional', 'compression']
[tests/functional/devices:Linux]
tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos']
tags = ['functional', 'devices']
[tests/functional/direct:Linux]
-tests = ['dio_write_verify']
+tests = ['dio_loopback_dev', 'dio_write_verify']
tags = ['functional', 'direct']
[tests/functional/events:Linux]
tests = ['events_001_pos', 'events_002_pos', 'zed_rc_filter', 'zed_fd_spill',
'zed_cksum_reported', 'zed_cksum_config', 'zed_io_config',
'zed_slow_io', 'zed_slow_io_many_vdevs', 'zed_diagnose_multiple']
tags = ['functional', 'events']
[tests/functional/fadvise:Linux]
tests = ['fadvise_sequential']
tags = ['functional', 'fadvise']
[tests/functional/fallocate:Linux]
tests = ['fallocate_prealloc', 'fallocate_zero-range']
tags = ['functional', 'fallocate']
[tests/functional/fault:Linux]
tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_online_002_pos',
'auto_replace_001_pos', 'auto_replace_002_pos', 'auto_spare_001_pos',
'auto_spare_002_pos', 'auto_spare_multiple', 'auto_spare_ashift',
'auto_spare_shared', 'decrypt_fault', 'decompress_fault',
'fault_limits', 'scrub_after_resilver', 'suspend_on_probe_errors',
'suspend_resume_single', 'zpool_status_-s']
tags = ['functional', 'fault']
[tests/functional/features/large_dnode:Linux]
tests = ['large_dnode_002_pos', 'large_dnode_006_pos', 'large_dnode_008_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/io:Linux]
tests = ['libaio', 'io_uring']
tags = ['functional', 'io']
[tests/functional/largest_pool:Linux]
tests = ['largest_pool_001_pos']
pre =
post =
tags = ['functional', 'largest_pool']
[tests/functional/longname:Linux]
tests = ['longname_001_pos', 'longname_002_pos', 'longname_003_pos']
tags = ['functional', 'longname']
[tests/functional/luks:Linux]
pre =
post =
tests = ['luks_sanity']
tags = ['functional', 'luks']
[tests/functional/mmap:Linux]
tests = ['mmap_libaio_001_pos', 'mmap_sync_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mmp:Linux]
tests = ['mmp_on_thread', 'mmp_on_uberblocks', 'mmp_on_off', 'mmp_interval',
'mmp_active_import', 'mmp_inactive_import', 'mmp_exported_import',
'mmp_write_uberblocks', 'mmp_reset_interval', 'multihost_history',
'mmp_on_zdb', 'mmp_write_distribution', 'mmp_hostid', 'mmp_write_slow_disk']
tags = ['functional', 'mmp']
[tests/functional/mount:Linux]
tests = ['umount_unlinked_drain']
tags = ['functional', 'mount']
[tests/functional/pam:Linux]
-tests = ['pam_basic', 'pam_change_unmounted', 'pam_nounmount', 'pam_recursive',
- 'pam_short_password']
+tests = ['pam_basic', 'pam_change_unmounted', 'pam_mount_recursively',
+ 'pam_nounmount', 'pam_recursive', 'pam_short_password']
tags = ['functional', 'pam']
[tests/functional/procfs:Linux]
tests = ['procfs_list_basic', 'procfs_list_concurrent_readers',
'procfs_list_stale_read', 'pool_state']
tags = ['functional', 'procfs']
[tests/functional/projectquota:Linux]
tests = ['projectid_001_pos', 'projectid_002_pos', 'projectid_003_pos',
'projectquota_001_pos', 'projectquota_002_pos', 'projectquota_003_pos',
'projectquota_004_neg', 'projectquota_005_pos', 'projectquota_006_pos',
'projectquota_007_pos', 'projectquota_008_pos', 'projectquota_009_pos',
'projectspace_001_pos', 'projectspace_002_pos', 'projectspace_003_pos',
'projectspace_004_pos',
'projecttree_001_pos', 'projecttree_002_pos', 'projecttree_003_neg']
tags = ['functional', 'projectquota']
[tests/functional/dos_attributes:Linux]
tests = ['read_dos_attrs_001', 'write_dos_attrs_001']
tags = ['functional', 'dos_attributes']
[tests/functional/renameat2:Linux]
tests = ['renameat2_noreplace', 'renameat2_exchange', 'renameat2_whiteout']
tags = ['functional', 'renameat2']
[tests/functional/rsend:Linux]
tests = ['send_realloc_dnode_size', 'send_encrypted_files', 'send-c_longname']
tags = ['functional', 'rsend']
[tests/functional/simd:Linux]
pre =
post =
tests = ['simd_supported']
tags = ['functional', 'simd']
[tests/functional/snapshot:Linux]
tests = ['snapshot_015_pos', 'snapshot_016_pos']
tags = ['functional', 'snapshot']
[tests/functional/tmpfile:Linux]
tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos',
'tmpfile_stat_mode']
tags = ['functional', 'tmpfile']
[tests/functional/upgrade:Linux]
tests = ['upgrade_projectquota_001_pos', 'upgrade_projectquota_002_pos']
tags = ['functional', 'upgrade']
[tests/functional/user_namespace:Linux]
tests = ['user_namespace_001', 'user_namespace_002', 'user_namespace_003',
'user_namespace_004']
tags = ['functional', 'user_namespace']
[tests/functional/userquota:Linux]
tests = ['groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos',
'userquota_013_pos', 'userspace_003_pos']
tags = ['functional', 'userquota']
[tests/functional/zvol/zvol_misc:Linux]
tests = ['zvol_misc_fua']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/idmap_mount:Linux]
tests = ['idmap_mount_001', 'idmap_mount_002', 'idmap_mount_003',
'idmap_mount_004', 'idmap_mount_005']
tags = ['functional', 'idmap_mount']
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
index d0eb4c30db48..dcefb26a4036 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
@@ -1,2185 +1,2187 @@
CLEANFILES =
dist_noinst_DATA =
include $(top_srcdir)/config/Substfiles.am
datadir_zfs_tests_testsdir = $(datadir)/$(PACKAGE)/zfs-tests/tests
nobase_dist_datadir_zfs_tests_tests_DATA = \
perf/nfs-sample.cfg \
perf/perf.shlib \
\
perf/fio/mkfiles.fio \
perf/fio/random_reads.fio \
perf/fio/random_readwrite.fio \
perf/fio/random_readwrite_fixed.fio \
perf/fio/random_writes.fio \
perf/fio/sequential_reads.fio \
perf/fio/sequential_readwrite.fio \
perf/fio/sequential_writes.fio
nobase_dist_datadir_zfs_tests_tests_SCRIPTS = \
perf/regression/random_reads.ksh \
perf/regression/random_readwrite.ksh \
perf/regression/random_readwrite_fixed.ksh \
perf/regression/random_writes.ksh \
perf/regression/random_writes_zil.ksh \
perf/regression/sequential_reads_arc_cached_clone.ksh \
perf/regression/sequential_reads_arc_cached.ksh \
perf/regression/sequential_reads_dbuf_cached.ksh \
perf/regression/sequential_reads.ksh \
perf/regression/sequential_writes.ksh \
perf/regression/setup.ksh \
\
perf/scripts/prefetch_io.sh
# These lists can be regenerated by running make regen-tests at the root, or, on a *clean* source:
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
#
# simd and tmpfile are Linux-only and not installed elsewhere
#
# C programs are specced in ../Makefile.am above as part of the main Makefile
find_common := find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po'
regen:
@$(MAKE) -C $(top_builddir) clean
@$(MAKE) clean
$(SED) $(ac_inplace) '/^# -- >8 --/q' Makefile.am
echo >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_DATA = \' >> Makefile.am
$(find_common) ! -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \' >> Makefile.am
$(find_common) -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'SUBSTFILES += $$(nobase_nodist_datadir_zfs_tests_tests_DATA) $$(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)' >> Makefile.am
echo >> Makefile.am
echo 'if BUILD_LINUX' >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'endif' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_DATA += \' >> Makefile.am
$(find_common) ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
# -- >8 --
nobase_nodist_datadir_zfs_tests_tests_DATA = \
functional/pam/utilities.kshlib
nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \
functional/pyzfs/pyzfs_unittest.ksh
SUBSTFILES += $(nobase_nodist_datadir_zfs_tests_tests_DATA) $(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)
if BUILD_LINUX
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/simd/simd_supported.ksh \
functional/tmpfile/cleanup.ksh \
functional/tmpfile/setup.ksh \
functional/luks/luks_sanity.ksh
endif
nobase_dist_datadir_zfs_tests_tests_DATA += \
functional/acl/acl.cfg \
functional/acl/acl_common.kshlib \
functional/alloc_class/alloc_class.cfg \
functional/alloc_class/alloc_class.kshlib \
functional/atime/atime.cfg \
functional/atime/atime_common.kshlib \
functional/bclone/bclone.cfg \
functional/bclone/bclone_common.kshlib \
functional/bclone/bclone_corner_cases.kshlib \
functional/block_cloning/block_cloning.kshlib \
functional/cache/cache.cfg \
functional/cache/cache.kshlib \
functional/cachefile/cachefile.cfg \
functional/cachefile/cachefile.kshlib \
functional/casenorm/casenorm.cfg \
functional/casenorm/casenorm.kshlib \
functional/channel_program/channel_common.kshlib \
functional/channel_program/lua_core/tst.args_to_lua.out \
functional/channel_program/lua_core/tst.args_to_lua.zcp \
functional/channel_program/lua_core/tst.divide_by_zero.err \
functional/channel_program/lua_core/tst.divide_by_zero.zcp \
functional/channel_program/lua_core/tst.exists.zcp \
functional/channel_program/lua_core/tst.large_prog.out \
functional/channel_program/lua_core/tst.large_prog.zcp \
functional/channel_program/lua_core/tst.lib_base.lua \
functional/channel_program/lua_core/tst.lib_coroutine.lua \
functional/channel_program/lua_core/tst.lib_strings.lua \
functional/channel_program/lua_core/tst.lib_table.lua \
functional/channel_program/lua_core/tst.nested_neg.zcp \
functional/channel_program/lua_core/tst.nested_pos.zcp \
functional/channel_program/lua_core/tst.recursive.zcp \
functional/channel_program/lua_core/tst.return_large.zcp \
functional/channel_program/lua_core/tst.return_recursive_table.zcp \
functional/channel_program/lua_core/tst.stack_gsub.err \
functional/channel_program/lua_core/tst.stack_gsub.zcp \
functional/channel_program/lua_core/tst.timeout.zcp \
functional/channel_program/synctask_core/tst.bookmark.copy.zcp \
functional/channel_program/synctask_core/tst.bookmark.create.zcp \
functional/channel_program/synctask_core/tst.get_index_props.out \
functional/channel_program/synctask_core/tst.get_index_props.zcp \
functional/channel_program/synctask_core/tst.get_number_props.out \
functional/channel_program/synctask_core/tst.get_number_props.zcp \
functional/channel_program/synctask_core/tst.get_string_props.out \
functional/channel_program/synctask_core/tst.get_string_props.zcp \
functional/channel_program/synctask_core/tst.promote_conflict.zcp \
functional/channel_program/synctask_core/tst.set_props.zcp \
functional/channel_program/synctask_core/tst.snapshot_destroy.zcp \
functional/channel_program/synctask_core/tst.snapshot_neg.zcp \
functional/channel_program/synctask_core/tst.snapshot_recursive.zcp \
functional/channel_program/synctask_core/tst.snapshot_rename.zcp \
functional/channel_program/synctask_core/tst.snapshot_simple.zcp \
functional/checksum/default.cfg \
functional/clean_mirror/clean_mirror_common.kshlib \
functional/clean_mirror/default.cfg \
functional/cli_root/cli_common.kshlib \
functional/cli_root/zfs_copies/zfs_copies.cfg \
functional/cli_root/zfs_copies/zfs_copies.kshlib \
functional/cli_root/zfs_create/properties.kshlib \
functional/cli_root/zfs_create/zfs_create.cfg \
functional/cli_root/zfs_create/zfs_create_common.kshlib \
functional/cli_root/zfs_destroy/zfs_destroy.cfg \
functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib \
functional/cli_root/zfs_get/zfs_get_common.kshlib \
functional/cli_root/zfs_get/zfs_get_list_d.kshlib \
functional/cli_root/zfs_jail/jail.conf \
functional/cli_root/zfs_load-key/HEXKEY \
functional/cli_root/zfs_load-key/PASSPHRASE \
functional/cli_root/zfs_load-key/RAWKEY \
functional/cli_root/zfs_load-key/zfs_load-key.cfg \
functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib \
functional/cli_root/zfs_mount/zfs_mount.cfg \
functional/cli_root/zfs_mount/zfs_mount.kshlib \
functional/cli_root/zfs_promote/zfs_promote.cfg \
functional/cli_root/zfs_receive/zstd_test_data.txt \
functional/cli_root/zfs_rename/zfs_rename.cfg \
functional/cli_root/zfs_rename/zfs_rename.kshlib \
functional/cli_root/zfs_rollback/zfs_rollback.cfg \
functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib \
functional/cli_root/zfs_send/zfs_send.cfg \
functional/cli_root/zfs_set/zfs_set_common.kshlib \
functional/cli_root/zfs_share/zfs_share.cfg \
functional/cli_root/zfs_snapshot/zfs_snapshot.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.kshlib \
functional/cli_root/zfs_upgrade/zfs_upgrade.kshlib \
functional/cli_root/zfs_wait/zfs_wait.kshlib \
functional/cli_root/zpool_add/zpool_add.cfg \
functional/cli_root/zpool_add/zpool_add.kshlib \
functional/cli_root/zpool_clear/zpool_clear.cfg \
functional/cli_root/zpool_create/draidcfg.gz \
functional/cli_root/zpool_create/zpool_create.cfg \
functional/cli_root/zpool_create/zpool_create.shlib \
functional/cli_root/zpool_destroy/zpool_destroy.cfg \
functional/cli_root/zpool_events/zpool_events.cfg \
functional/cli_root/zpool_events/zpool_events.kshlib \
functional/cli_root/zpool_expand/zpool_expand.cfg \
functional/cli_root/zpool_export/zpool_export.cfg \
functional/cli_root/zpool_export/zpool_export.kshlib \
functional/cli_root/zpool_get/vdev_get.cfg \
functional/cli_root/zpool_get/zpool_get.cfg \
functional/cli_root/zpool_get/zpool_get_parsable.cfg \
functional/cli_root/zpool_import/blockfiles/cryptv0.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/missing_ivset.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/unclean_export.dat.bz2 \
functional/cli_root/zpool_import/zpool_import.cfg \
functional/cli_root/zpool_import/zpool_import.kshlib \
functional/cli_root/zpool_initialize/zpool_initialize.kshlib \
functional/cli_root/zpool_labelclear/labelclear.cfg \
functional/cli_root/zpool_remove/zpool_remove.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.shlib \
functional/cli_root/zpool_resilver/zpool_resilver.cfg \
functional/cli_root/zpool_scrub/zpool_scrub.cfg \
functional/cli_root/zpool_split/zpool_split.cfg \
functional/cli_root/zpool_trim/zpool_trim.kshlib \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v10.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v11.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v12.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v13.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v14.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v15.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz21.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz22.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz23.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v4.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v5.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v6.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v7.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v8.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v999.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v9.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-vBROKEN.dat.bz2 \
functional/cli_root/zpool_upgrade/zpool_upgrade.cfg \
functional/cli_root/zpool_upgrade/zpool_upgrade.kshlib \
functional/cli_root/zpool_wait/zpool_wait.kshlib \
functional/cli_root/zhack/library.kshlib \
functional/cli_user/misc/misc.cfg \
functional/cli_user/zfs_list/zfs_list.cfg \
functional/cli_user/zfs_list/zfs_list.kshlib \
functional/compression/compress.cfg \
functional/compression/testpool_zstd.tar.gz \
functional/deadman/deadman.cfg \
functional/delegate/delegate.cfg \
functional/delegate/delegate_common.kshlib \
functional/devices/devices.cfg \
functional/devices/devices_common.kshlib \
functional/direct/dio.cfg \
functional/direct/dio.kshlib \
functional/events/events.cfg \
functional/events/events_common.kshlib \
functional/fault/fault.cfg \
functional/grow/grow.cfg \
functional/history/history.cfg \
functional/history/history_common.kshlib \
functional/history/i386.migratedpool.DAT.Z \
functional/history/i386.orig_history.txt \
functional/history/sparc.migratedpool.DAT.Z \
functional/history/sparc.orig_history.txt \
functional/history/zfs-pool-v4.dat.Z \
functional/inheritance/config001.cfg \
functional/inheritance/config002.cfg \
functional/inheritance/config003.cfg \
functional/inheritance/config004.cfg \
functional/inheritance/config005.cfg \
functional/inheritance/config006.cfg \
functional/inheritance/config007.cfg \
functional/inheritance/config008.cfg \
functional/inheritance/config009.cfg \
functional/inheritance/config010.cfg \
functional/inheritance/config011.cfg \
functional/inheritance/config012.cfg \
functional/inheritance/config013.cfg \
functional/inheritance/config014.cfg \
functional/inheritance/config015.cfg \
functional/inheritance/config016.cfg \
functional/inheritance/config017.cfg \
functional/inheritance/config018.cfg \
functional/inheritance/config019.cfg \
functional/inheritance/config020.cfg \
functional/inheritance/config021.cfg \
functional/inheritance/config022.cfg \
functional/inheritance/config023.cfg \
functional/inheritance/config024.cfg \
functional/inheritance/inherit.kshlib \
functional/inheritance/README.config \
functional/inheritance/README.state \
functional/inheritance/state001.cfg \
functional/inheritance/state002.cfg \
functional/inheritance/state003.cfg \
functional/inheritance/state004.cfg \
functional/inheritance/state005.cfg \
functional/inheritance/state006.cfg \
functional/inheritance/state007.cfg \
functional/inheritance/state008.cfg \
functional/inheritance/state009.cfg \
functional/inheritance/state010.cfg \
functional/inheritance/state011.cfg \
functional/inheritance/state012.cfg \
functional/inheritance/state013.cfg \
functional/inheritance/state014.cfg \
functional/inheritance/state015.cfg \
functional/inheritance/state016.cfg \
functional/inheritance/state017.cfg \
functional/inheritance/state018.cfg \
functional/inheritance/state019.cfg \
functional/inheritance/state020.cfg \
functional/inheritance/state021.cfg \
functional/inheritance/state022.cfg \
functional/inheritance/state023.cfg \
functional/inheritance/state024.cfg \
functional/inuse/inuse.cfg \
functional/io/io.cfg \
functional/l2arc/l2arc.cfg \
functional/largest_pool/largest_pool.cfg \
functional/migration/migration.cfg \
functional/migration/migration.kshlib \
functional/mmap/mmap.cfg \
functional/mmp/mmp.cfg \
functional/mmp/mmp.kshlib \
functional/mv_files/mv_files.cfg \
functional/mv_files/mv_files_common.kshlib \
functional/nopwrite/nopwrite.shlib \
functional/no_space/enospc.cfg \
functional/online_offline/online_offline.cfg \
functional/pool_checkpoint/pool_checkpoint.kshlib \
functional/projectquota/projectquota.cfg \
functional/projectquota/projectquota_common.kshlib \
functional/quota/quota.cfg \
functional/quota/quota.kshlib \
functional/redacted_send/redacted.cfg \
functional/redacted_send/redacted.kshlib \
functional/redundancy/redundancy.cfg \
functional/redundancy/redundancy.kshlib \
functional/refreserv/refreserv.cfg \
functional/removal/removal.kshlib \
functional/replacement/replacement.cfg \
functional/reservation/reservation.cfg \
functional/reservation/reservation.shlib \
functional/rsend/dedup_encrypted_zvol.bz2 \
functional/rsend/dedup_encrypted_zvol.zsend.bz2 \
functional/rsend/dedup.zsend.bz2 \
functional/rsend/fs.tar.gz \
functional/rsend/rsend.cfg \
functional/rsend/rsend.kshlib \
functional/scrub_mirror/default.cfg \
functional/scrub_mirror/scrub_mirror_common.kshlib \
functional/slog/slog.cfg \
functional/slog/slog.kshlib \
functional/snapshot/snapshot.cfg \
functional/snapused/snapused.kshlib \
functional/sparse/sparse.cfg \
functional/trim/trim.cfg \
functional/trim/trim.kshlib \
functional/truncate/truncate.cfg \
functional/upgrade/upgrade_common.kshlib \
functional/user_namespace/user_namespace.cfg \
functional/user_namespace/user_namespace_common.kshlib \
functional/userquota/13709_reproducer.bz2 \
functional/userquota/userquota.cfg \
functional/userquota/userquota_common.kshlib \
functional/vdev_zaps/vdev_zaps.kshlib \
functional/xattr/xattr.cfg \
functional/xattr/xattr_common.kshlib \
functional/zvol/zvol.cfg \
functional/zvol/zvol_cli/zvol_cli.cfg \
functional/zvol/zvol_common.shlib \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC.cfg \
functional/zvol/zvol_misc/zvol_misc_common.kshlib \
functional/zvol/zvol_swap/zvol_swap.cfg \
functional/idmap_mount/idmap_mount.cfg \
functional/idmap_mount/idmap_mount_common.kshlib
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/acl/off/cleanup.ksh \
functional/acl/off/dosmode.ksh \
functional/acl/off/posixmode.ksh \
functional/acl/off/setup.ksh \
functional/acl/posix/cleanup.ksh \
functional/acl/posix/posix_001_pos.ksh \
functional/acl/posix/posix_002_pos.ksh \
functional/acl/posix/posix_003_pos.ksh \
functional/acl/posix/posix_004_pos.ksh \
functional/acl/posix-sa/cleanup.ksh \
functional/acl/posix-sa/posix_001_pos.ksh \
functional/acl/posix-sa/posix_002_pos.ksh \
functional/acl/posix-sa/posix_003_pos.ksh \
functional/acl/posix-sa/posix_004_pos.ksh \
functional/acl/posix-sa/setup.ksh \
functional/acl/posix/setup.ksh \
functional/alloc_class/alloc_class_001_pos.ksh \
functional/alloc_class/alloc_class_002_neg.ksh \
functional/alloc_class/alloc_class_003_pos.ksh \
functional/alloc_class/alloc_class_004_pos.ksh \
functional/alloc_class/alloc_class_005_pos.ksh \
functional/alloc_class/alloc_class_006_pos.ksh \
functional/alloc_class/alloc_class_007_pos.ksh \
functional/alloc_class/alloc_class_008_pos.ksh \
functional/alloc_class/alloc_class_009_pos.ksh \
functional/alloc_class/alloc_class_010_pos.ksh \
functional/alloc_class/alloc_class_011_neg.ksh \
functional/alloc_class/alloc_class_012_pos.ksh \
functional/alloc_class/alloc_class_013_pos.ksh \
functional/alloc_class/alloc_class_014_neg.ksh \
functional/alloc_class/alloc_class_015_pos.ksh \
functional/alloc_class/cleanup.ksh \
functional/alloc_class/setup.ksh \
functional/append/file_append.ksh \
functional/append/threadsappend_001_pos.ksh \
functional/append/cleanup.ksh \
functional/append/setup.ksh \
functional/arc/arcstats_runtime_tuning.ksh \
functional/arc/cleanup.ksh \
functional/arc/dbufstats_001_pos.ksh \
functional/arc/dbufstats_002_pos.ksh \
functional/arc/dbufstats_003_pos.ksh \
functional/arc/setup.ksh \
functional/atime/atime_001_pos.ksh \
functional/atime/atime_002_neg.ksh \
functional/atime/atime_003_pos.ksh \
functional/atime/cleanup.ksh \
functional/atime/root_atime_off.ksh \
functional/atime/root_atime_on.ksh \
functional/atime/root_relatime_on.ksh \
functional/atime/setup.ksh \
functional/bclone/bclone_crossfs_corner_cases.ksh \
functional/bclone/bclone_crossfs_corner_cases_limited.ksh \
functional/bclone/bclone_crossfs_data.ksh \
functional/bclone/bclone_crossfs_embedded.ksh \
functional/bclone/bclone_crossfs_hole.ksh \
functional/bclone/bclone_diffprops_all.ksh \
functional/bclone/bclone_diffprops_checksum.ksh \
functional/bclone/bclone_diffprops_compress.ksh \
functional/bclone/bclone_diffprops_copies.ksh \
functional/bclone/bclone_diffprops_recordsize.ksh \
functional/bclone/bclone_prop_sync.ksh \
functional/bclone/bclone_samefs_corner_cases.ksh \
functional/bclone/bclone_samefs_corner_cases_limited.ksh \
functional/bclone/bclone_samefs_data.ksh \
functional/bclone/bclone_samefs_embedded.ksh \
functional/bclone/bclone_samefs_hole.ksh \
functional/bclone/cleanup.ksh \
functional/bclone/setup.ksh \
functional/block_cloning/cleanup.ksh \
functional/block_cloning/setup.ksh \
functional/block_cloning/block_cloning_clone_mmap_cached.ksh \
functional/block_cloning/block_cloning_clone_mmap_write.ksh \
functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh \
functional/block_cloning/block_cloning_copyfilerange_fallback.ksh \
functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh \
functional/block_cloning/block_cloning_copyfilerange.ksh \
functional/block_cloning/block_cloning_copyfilerange_partial.ksh \
functional/block_cloning/block_cloning_disabled_copyfilerange.ksh \
functional/block_cloning/block_cloning_disabled_ficlone.ksh \
functional/block_cloning/block_cloning_disabled_ficlonerange.ksh \
functional/block_cloning/block_cloning_ficlone.ksh \
functional/block_cloning/block_cloning_ficlonerange.ksh \
functional/block_cloning/block_cloning_ficlonerange_partial.ksh \
functional/block_cloning/block_cloning_cross_enc_dataset.ksh \
functional/block_cloning/block_cloning_replay.ksh \
functional/block_cloning/block_cloning_replay_encrypted.ksh \
functional/block_cloning/block_cloning_lwb_buffer_overflow.ksh \
functional/block_cloning/block_cloning_rlimit_fsize.ksh \
functional/block_cloning/block_cloning_large_offset.ksh \
functional/bootfs/bootfs_001_pos.ksh \
functional/bootfs/bootfs_002_neg.ksh \
functional/bootfs/bootfs_003_pos.ksh \
functional/bootfs/bootfs_004_neg.ksh \
functional/bootfs/bootfs_005_neg.ksh \
functional/bootfs/bootfs_006_pos.ksh \
functional/bootfs/bootfs_007_pos.ksh \
functional/bootfs/bootfs_008_pos.ksh \
functional/bootfs/cleanup.ksh \
functional/bootfs/setup.ksh \
functional/btree/btree_negative.ksh \
functional/btree/btree_positive.ksh \
functional/cache/cache_001_pos.ksh \
functional/cache/cache_002_pos.ksh \
functional/cache/cache_003_pos.ksh \
functional/cache/cache_004_neg.ksh \
functional/cache/cache_005_neg.ksh \
functional/cache/cache_006_pos.ksh \
functional/cache/cache_007_neg.ksh \
functional/cache/cache_008_neg.ksh \
functional/cache/cache_009_pos.ksh \
functional/cache/cache_010_pos.ksh \
functional/cache/cache_011_pos.ksh \
functional/cache/cache_012_pos.ksh \
functional/cache/cleanup.ksh \
functional/cachefile/cachefile_001_pos.ksh \
functional/cachefile/cachefile_002_pos.ksh \
functional/cachefile/cachefile_003_pos.ksh \
functional/cachefile/cachefile_004_pos.ksh \
functional/cachefile/cleanup.ksh \
functional/cachefile/setup.ksh \
functional/cache/setup.ksh \
functional/casenorm/case_all_values.ksh \
functional/casenorm/cleanup.ksh \
functional/casenorm/insensitive_formd_delete.ksh \
functional/casenorm/insensitive_formd_lookup.ksh \
functional/casenorm/insensitive_none_delete.ksh \
functional/casenorm/insensitive_none_lookup.ksh \
functional/casenorm/mixed_create_failure.ksh \
functional/casenorm/mixed_formd_delete.ksh \
functional/casenorm/mixed_formd_lookup_ci.ksh \
functional/casenorm/mixed_formd_lookup.ksh \
functional/casenorm/mixed_none_delete.ksh \
functional/casenorm/mixed_none_lookup_ci.ksh \
functional/casenorm/mixed_none_lookup.ksh \
functional/casenorm/norm_all_values.ksh \
functional/casenorm/sensitive_formd_delete.ksh \
functional/casenorm/sensitive_formd_lookup.ksh \
functional/casenorm/sensitive_none_delete.ksh \
functional/casenorm/sensitive_none_lookup.ksh \
functional/casenorm/setup.ksh \
functional/channel_program/lua_core/cleanup.ksh \
functional/channel_program/lua_core/setup.ksh \
functional/channel_program/lua_core/tst.args_to_lua.ksh \
functional/channel_program/lua_core/tst.divide_by_zero.ksh \
functional/channel_program/lua_core/tst.exists.ksh \
functional/channel_program/lua_core/tst.integer_illegal.ksh \
functional/channel_program/lua_core/tst.integer_overflow.ksh \
functional/channel_program/lua_core/tst.language_functions_neg.ksh \
functional/channel_program/lua_core/tst.language_functions_pos.ksh \
functional/channel_program/lua_core/tst.large_prog.ksh \
functional/channel_program/lua_core/tst.libraries.ksh \
functional/channel_program/lua_core/tst.memory_limit.ksh \
functional/channel_program/lua_core/tst.nested_neg.ksh \
functional/channel_program/lua_core/tst.nested_pos.ksh \
functional/channel_program/lua_core/tst.nvlist_to_lua.ksh \
functional/channel_program/lua_core/tst.recursive_neg.ksh \
functional/channel_program/lua_core/tst.recursive_pos.ksh \
functional/channel_program/lua_core/tst.return_large.ksh \
functional/channel_program/lua_core/tst.return_nvlist_neg.ksh \
functional/channel_program/lua_core/tst.return_nvlist_pos.ksh \
functional/channel_program/lua_core/tst.return_recursive_table.ksh \
functional/channel_program/lua_core/tst.stack_gsub.ksh \
functional/channel_program/lua_core/tst.timeout.ksh \
functional/channel_program/synctask_core/cleanup.ksh \
functional/channel_program/synctask_core/setup.ksh \
functional/channel_program/synctask_core/tst.bookmark.copy.ksh \
functional/channel_program/synctask_core/tst.bookmark.create.ksh \
functional/channel_program/synctask_core/tst.destroy_fs.ksh \
functional/channel_program/synctask_core/tst.destroy_snap.ksh \
functional/channel_program/synctask_core/tst.get_count_and_limit.ksh \
functional/channel_program/synctask_core/tst.get_index_props.ksh \
functional/channel_program/synctask_core/tst.get_mountpoint.ksh \
functional/channel_program/synctask_core/tst.get_neg.ksh \
functional/channel_program/synctask_core/tst.get_number_props.ksh \
functional/channel_program/synctask_core/tst.get_string_props.ksh \
functional/channel_program/synctask_core/tst.get_type.ksh \
functional/channel_program/synctask_core/tst.get_userquota.ksh \
functional/channel_program/synctask_core/tst.get_written.ksh \
functional/channel_program/synctask_core/tst.inherit.ksh \
functional/channel_program/synctask_core/tst.list_bookmarks.ksh \
functional/channel_program/synctask_core/tst.list_children.ksh \
functional/channel_program/synctask_core/tst.list_clones.ksh \
functional/channel_program/synctask_core/tst.list_holds.ksh \
functional/channel_program/synctask_core/tst.list_snapshots.ksh \
functional/channel_program/synctask_core/tst.list_system_props.ksh \
functional/channel_program/synctask_core/tst.list_user_props.ksh \
functional/channel_program/synctask_core/tst.parse_args_neg.ksh \
functional/channel_program/synctask_core/tst.promote_conflict.ksh \
functional/channel_program/synctask_core/tst.promote_multiple.ksh \
functional/channel_program/synctask_core/tst.promote_simple.ksh \
functional/channel_program/synctask_core/tst.rollback_mult.ksh \
functional/channel_program/synctask_core/tst.rollback_one.ksh \
functional/channel_program/synctask_core/tst.set_props.ksh \
functional/channel_program/synctask_core/tst.snapshot_destroy.ksh \
functional/channel_program/synctask_core/tst.snapshot_neg.ksh \
functional/channel_program/synctask_core/tst.snapshot_recursive.ksh \
functional/channel_program/synctask_core/tst.snapshot_rename.ksh \
functional/channel_program/synctask_core/tst.snapshot_simple.ksh \
functional/channel_program/synctask_core/tst.terminate_by_signal.ksh \
functional/chattr/chattr_001_pos.ksh \
functional/chattr/chattr_002_neg.ksh \
functional/chattr/cleanup.ksh \
functional/chattr/setup.ksh \
functional/checksum/cleanup.ksh \
functional/checksum/filetest_001_pos.ksh \
functional/checksum/filetest_002_pos.ksh \
functional/checksum/run_blake3_test.ksh \
functional/checksum/run_edonr_test.ksh \
functional/checksum/run_sha2_test.ksh \
functional/checksum/run_skein_test.ksh \
functional/checksum/setup.ksh \
functional/clean_mirror/clean_mirror_001_pos.ksh \
functional/clean_mirror/clean_mirror_002_pos.ksh \
functional/clean_mirror/clean_mirror_003_pos.ksh \
functional/clean_mirror/clean_mirror_004_pos.ksh \
functional/clean_mirror/cleanup.ksh \
functional/clean_mirror/setup.ksh \
functional/cli_root/json/cleanup.ksh \
functional/cli_root/json/setup.ksh \
functional/cli_root/json/json_sanity.ksh \
functional/cli_root/zinject/zinject_args.ksh \
functional/cli_root/zinject/zinject_counts.ksh \
functional/cli_root/zinject/zinject_probe.ksh \
functional/cli_root/zdb/zdb_002_pos.ksh \
functional/cli_root/zdb/zdb_003_pos.ksh \
functional/cli_root/zdb/zdb_004_pos.ksh \
functional/cli_root/zdb/zdb_005_pos.ksh \
functional/cli_root/zdb/zdb_006_pos.ksh \
functional/cli_root/zdb/zdb_args_neg.ksh \
functional/cli_root/zdb/zdb_args_pos.ksh \
functional/cli_root/zdb/zdb_backup.ksh \
functional/cli_root/zdb/zdb_block_size_histogram.ksh \
functional/cli_root/zdb/zdb_checksum.ksh \
functional/cli_root/zdb/zdb_decompress.ksh \
functional/cli_root/zdb/zdb_decompress_zstd.ksh \
functional/cli_root/zdb/zdb_display_block.ksh \
functional/cli_root/zdb/zdb_encrypted.ksh \
functional/cli_root/zdb/zdb_label_checksum.ksh \
functional/cli_root/zdb/zdb_object_range_neg.ksh \
functional/cli_root/zdb/zdb_object_range_pos.ksh \
functional/cli_root/zdb/zdb_objset_id.ksh \
functional/cli_root/zdb/zdb_recover_2.ksh \
functional/cli_root/zdb/zdb_recover.ksh \
functional/cli_root/zfs_bookmark/cleanup.ksh \
functional/cli_root/zfs_bookmark/setup.ksh \
functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh \
functional/cli_root/zfs_change-key/cleanup.ksh \
functional/cli_root/zfs_change-key/setup.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_child.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_clones.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_format.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_inherit.ksh \
functional/cli_root/zfs_change-key/zfs_change-key.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_load.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_location.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_pbkdf2iters.ksh \
functional/cli_root/zfs/cleanup.ksh \
functional/cli_root/zfs_clone/cleanup.ksh \
functional/cli_root/zfs_clone/setup.ksh \
functional/cli_root/zfs_clone/zfs_clone_001_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_002_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_003_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_004_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_005_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_006_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_007_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_008_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_009_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_deeply_nested.ksh \
functional/cli_root/zfs_clone/zfs_clone_encrypted.ksh \
functional/cli_root/zfs_clone/zfs_clone_rm_nested.ksh \
functional/cli_root/zfs_copies/cleanup.ksh \
functional/cli_root/zfs_copies/setup.ksh \
functional/cli_root/zfs_copies/zfs_copies_001_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_003_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_004_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_005_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_006_pos.ksh \
functional/cli_root/zfs_create/cleanup.ksh \
functional/cli_root/zfs_create/setup.ksh \
functional/cli_root/zfs_create/zfs_create_001_pos.ksh \
functional/cli_root/zfs_create/zfs_create_002_pos.ksh \
functional/cli_root/zfs_create/zfs_create_003_pos.ksh \
functional/cli_root/zfs_create/zfs_create_004_pos.ksh \
functional/cli_root/zfs_create/zfs_create_005_pos.ksh \
functional/cli_root/zfs_create/zfs_create_006_pos.ksh \
functional/cli_root/zfs_create/zfs_create_007_pos.ksh \
functional/cli_root/zfs_create/zfs_create_008_neg.ksh \
functional/cli_root/zfs_create/zfs_create_009_neg.ksh \
functional/cli_root/zfs_create/zfs_create_010_neg.ksh \
functional/cli_root/zfs_create/zfs_create_011_pos.ksh \
functional/cli_root/zfs_create/zfs_create_012_pos.ksh \
functional/cli_root/zfs_create/zfs_create_013_pos.ksh \
functional/cli_root/zfs_create/zfs_create_014_pos.ksh \
functional/cli_root/zfs_create/zfs_create_crypt_combos.ksh \
functional/cli_root/zfs_create/zfs_create_dryrun.ksh \
functional/cli_root/zfs_create/zfs_create_encrypted.ksh \
functional/cli_root/zfs_create/zfs_create_nomount.ksh \
functional/cli_root/zfs_create/zfs_create_verbose.ksh \
functional/cli_root/zfs_destroy/cleanup.ksh \
functional/cli_root/zfs_destroy/setup.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_dedup.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_002_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_003_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_006_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_007_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_008_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_009_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_010_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_011_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_012_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_013_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal.ksh \
functional/cli_root/zfs_diff/cleanup.ksh \
functional/cli_root/zfs_diff/setup.ksh \
functional/cli_root/zfs_diff/zfs_diff_changes.ksh \
functional/cli_root/zfs_diff/zfs_diff_cliargs.ksh \
functional/cli_root/zfs_diff/zfs_diff_encrypted.ksh \
functional/cli_root/zfs_diff/zfs_diff_mangle.ksh \
functional/cli_root/zfs_diff/zfs_diff_timestamp.ksh \
functional/cli_root/zfs_diff/zfs_diff_types.ksh \
functional/cli_root/zfs_get/cleanup.ksh \
functional/cli_root/zfs_get/setup.ksh \
functional/cli_root/zfs_get/zfs_get_001_pos.ksh \
functional/cli_root/zfs_get/zfs_get_002_pos.ksh \
functional/cli_root/zfs_get/zfs_get_003_pos.ksh \
functional/cli_root/zfs_get/zfs_get_004_pos.ksh \
functional/cli_root/zfs_get/zfs_get_005_neg.ksh \
functional/cli_root/zfs_get/zfs_get_006_neg.ksh \
functional/cli_root/zfs_get/zfs_get_007_neg.ksh \
functional/cli_root/zfs_get/zfs_get_008_pos.ksh \
functional/cli_root/zfs_get/zfs_get_009_pos.ksh \
functional/cli_root/zfs_get/zfs_get_010_neg.ksh \
functional/cli_root/zfs_ids_to_path/cleanup.ksh \
functional/cli_root/zfs_ids_to_path/setup.ksh \
functional/cli_root/zfs_ids_to_path/zfs_ids_to_path_001_pos.ksh \
functional/cli_root/zfs_inherit/cleanup.ksh \
functional/cli_root/zfs_inherit/setup.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_001_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_mountpoint.ksh \
functional/cli_root/zfs_jail/cleanup.ksh \
functional/cli_root/zfs_jail/setup.ksh \
functional/cli_root/zfs_jail/zfs_jail_001_pos.ksh \
functional/cli_root/zfs_load-key/cleanup.ksh \
functional/cli_root/zfs_load-key/setup.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_all.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_file.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_https.ksh \
functional/cli_root/zfs_load-key/zfs_load-key.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_location.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_noop.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_recursive.ksh \
functional/cli_root/zfs_mount/cleanup.ksh \
functional/cli_root/zfs_mount/setup.ksh \
functional/cli_root/zfs_mount/zfs_mount_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_002_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_003_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_004_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_005_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_006_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_009_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_011_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_012_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_013_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_014_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_fail.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_mountpoints.ksh \
functional/cli_root/zfs_mount/zfs_mount_encrypted.ksh \
functional/cli_root/zfs_mount/zfs_mount_recursive.ksh \
functional/cli_root/zfs_mount/zfs_mount_remount.ksh \
functional/cli_root/zfs_mount/zfs_mount_test_race.ksh \
functional/cli_root/zfs_mount/zfs_multi_mount.ksh \
functional/cli_root/zfs_program/cleanup.ksh \
functional/cli_root/zfs_program/setup.ksh \
functional/cli_root/zfs_program/zfs_program_json.ksh \
functional/cli_root/zfs_promote/cleanup.ksh \
functional/cli_root/zfs_promote/setup.ksh \
functional/cli_root/zfs_promote/zfs_promote_001_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_002_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_003_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_004_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_005_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_007_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_008_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_encryptionroot.ksh \
functional/cli_root/zfs_property/cleanup.ksh \
functional/cli_root/zfs_property/setup.ksh \
functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh \
functional/cli_root/zfs_receive/cleanup.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_aliases.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_override.ksh \
functional/cli_root/zfs_receive/setup.ksh \
functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_002_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_003_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_005_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_006_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_007_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_008_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_009_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_010_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_011_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_012_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_013_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_014_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_015_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_016_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_-e.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_zstd.ksh \
functional/cli_root/zfs_receive/zfs_receive_new_props.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_-d.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_incremental.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw.ksh \
functional/cli_root/zfs_receive/zfs_receive_to_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_-wR-encrypted-mix.ksh \
functional/cli_root/zfs_receive/zfs_receive_corrective.ksh \
functional/cli_root/zfs_receive/zfs_receive_compressed_corrective.ksh \
functional/cli_root/zfs_receive/zfs_receive_large_block_corrective.ksh \
functional/cli_root/zfs_rename/cleanup.ksh \
functional/cli_root/zfs_rename/setup.ksh \
functional/cli_root/zfs_rename/zfs_rename_001_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_002_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_003_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_007_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_008_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_009_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_010_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_011_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_012_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_013_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_encrypted_child.ksh \
functional/cli_root/zfs_rename/zfs_rename_mountpoint.ksh \
functional/cli_root/zfs_rename/zfs_rename_nounmount.ksh \
functional/cli_root/zfs_rename/zfs_rename_to_encrypted.ksh \
functional/cli_root/zfs_reservation/cleanup.ksh \
functional/cli_root/zfs_reservation/setup.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_001_pos.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_002_pos.ksh \
functional/cli_root/zfs_rollback/cleanup.ksh \
functional/cli_root/zfs_rollback/setup.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_001_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_002_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_003_neg.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_004_neg.ksh \
functional/cli_root/zfs_send/cleanup.ksh \
functional/cli_root/zfs_send/setup.ksh \
functional/cli_root/zfs_send/zfs_send_001_pos.ksh \
functional/cli_root/zfs_send/zfs_send_002_pos.ksh \
functional/cli_root/zfs_send/zfs_send_003_pos.ksh \
functional/cli_root/zfs_send/zfs_send_004_neg.ksh \
functional/cli_root/zfs_send/zfs_send_005_pos.ksh \
functional/cli_root/zfs_send/zfs_send_006_pos.ksh \
functional/cli_root/zfs_send/zfs_send_007_pos.ksh \
functional/cli_root/zfs_send/zfs_send-b.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted_unloaded.ksh \
functional/cli_root/zfs_send/zfs_send_raw.ksh \
functional/cli_root/zfs_send/zfs_send_skip_missing.ksh \
functional/cli_root/zfs_send/zfs_send_sparse.ksh \
functional/cli_root/zfs_set/cache_001_pos.ksh \
functional/cli_root/zfs_set/cache_002_neg.ksh \
functional/cli_root/zfs_set/canmount_001_pos.ksh \
functional/cli_root/zfs_set/canmount_002_pos.ksh \
functional/cli_root/zfs_set/canmount_003_pos.ksh \
functional/cli_root/zfs_set/canmount_004_pos.ksh \
functional/cli_root/zfs_set/checksum_001_pos.ksh \
functional/cli_root/zfs_set/cleanup.ksh \
functional/cli_root/zfs_set/compression_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_002_pos.ksh \
functional/cli_root/zfs_set/mountpoint_003_pos.ksh \
functional/cli_root/zfs_set/onoffs_001_pos.ksh \
functional/cli_root/zfs_set/property_alias_001_pos.ksh \
functional/cli_root/zfs_set/readonly_001_pos.ksh \
functional/cli_root/zfs_set/reservation_001_neg.ksh \
functional/cli_root/zfs_set/ro_props_001_pos.ksh \
functional/cli_root/zfs_set/setup.ksh \
functional/cli_root/zfs_set/share_mount_001_neg.ksh \
functional/cli_root/zfs_set/snapdir_001_pos.ksh \
functional/cli_root/zfs/setup.ksh \
functional/cli_root/zfs_set/user_property_001_pos.ksh \
functional/cli_root/zfs_set/user_property_002_pos.ksh \
functional/cli_root/zfs_set/user_property_003_neg.ksh \
functional/cli_root/zfs_set/user_property_004_pos.ksh \
functional/cli_root/zfs_set/version_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_002_neg.ksh \
functional/cli_root/zfs_set/zfs_set_003_neg.ksh \
functional/cli_root/zfs_set/zfs_set_feature_activation.ksh \
functional/cli_root/zfs_set/zfs_set_keylocation.ksh \
functional/cli_root/zfs_set/zfs_set_nomount.ksh \
functional/cli_root/zfs_share/cleanup.ksh \
functional/cli_root/zfs_share/setup.ksh \
functional/cli_root/zfs_share/zfs_share_001_pos.ksh \
functional/cli_root/zfs_share/zfs_share_002_pos.ksh \
functional/cli_root/zfs_share/zfs_share_003_pos.ksh \
functional/cli_root/zfs_share/zfs_share_004_pos.ksh \
functional/cli_root/zfs_share/zfs_share_005_pos.ksh \
functional/cli_root/zfs_share/zfs_share_006_pos.ksh \
functional/cli_root/zfs_share/zfs_share_007_neg.ksh \
functional/cli_root/zfs_share/zfs_share_008_neg.ksh \
functional/cli_root/zfs_share/zfs_share_009_neg.ksh \
functional/cli_root/zfs_share/zfs_share_010_neg.ksh \
functional/cli_root/zfs_share/zfs_share_011_pos.ksh \
functional/cli_root/zfs_share/zfs_share_012_pos.ksh \
functional/cli_root/zfs_share/zfs_share_013_pos.ksh \
functional/cli_root/zfs_share/zfs_share_concurrent_shares.ksh \
functional/cli_root/zfs_share/zfs_share_after_mount.ksh \
functional/cli_root/zfs_snapshot/cleanup.ksh \
functional/cli_root/zfs_snapshot/setup.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_001_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_003_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_004_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_005_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_006_pos.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_007_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh \
functional/cli_root/zfs_sysfs/cleanup.ksh \
functional/cli_root/zfs_sysfs/setup.ksh \
functional/cli_root/zfs_sysfs/zfeature_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_sysfs_live.ksh \
functional/cli_root/zfs_sysfs/zpool_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zpool_set_unsupported.ksh \
functional/cli_root/zfs_unload-key/cleanup.ksh \
functional/cli_root/zfs_unload-key/setup.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_all.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_recursive.ksh \
functional/cli_root/zfs_unmount/cleanup.ksh \
functional/cli_root/zfs_unmount/setup.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_002_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_003_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_004_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_005_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_006_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_007_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_008_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_all_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_nested.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_unload_keys.ksh \
functional/cli_root/zfs_unshare/cleanup.ksh \
functional/cli_root/zfs_unshare/setup.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_002_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_003_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_005_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_006_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_007_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_008_pos.ksh \
functional/cli_root/zfs_upgrade/cleanup.ksh \
functional/cli_root/zfs_upgrade/setup.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_002_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_003_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_004_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_005_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_006_neg.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_007_neg.ksh \
functional/cli_root/zfs_wait/cleanup.ksh \
functional/cli_root/zfs_wait/setup.ksh \
functional/cli_root/zfs_wait/zfs_wait_deleteq.ksh \
functional/cli_root/zfs_wait/zfs_wait_getsubopt.ksh \
functional/cli_root/zfs/zfs_001_neg.ksh \
functional/cli_root/zfs/zfs_002_pos.ksh \
functional/cli_root/zfs/zfs_003_neg.ksh \
functional/cli_root/zhack/zhack_label_repair_001.ksh \
functional/cli_root/zhack/zhack_label_repair_002.ksh \
functional/cli_root/zhack/zhack_label_repair_003.ksh \
functional/cli_root/zhack/zhack_label_repair_004.ksh \
functional/cli_root/zpool_add/add_nested_replacing_spare.ksh \
functional/cli_root/zpool_add/add-o_ashift.ksh \
functional/cli_root/zpool_add/add_prop_ashift.ksh \
functional/cli_root/zpool_add/cleanup.ksh \
functional/cli_root/zpool_add/setup.ksh \
functional/cli_root/zpool_add/zpool_add_001_pos.ksh \
functional/cli_root/zpool_add/zpool_add_002_pos.ksh \
functional/cli_root/zpool_add/zpool_add_003_pos.ksh \
functional/cli_root/zpool_add/zpool_add_004_pos.ksh \
functional/cli_root/zpool_add/zpool_add_005_pos.ksh \
functional/cli_root/zpool_add/zpool_add_006_pos.ksh \
functional/cli_root/zpool_add/zpool_add_007_neg.ksh \
functional/cli_root/zpool_add/zpool_add_008_neg.ksh \
functional/cli_root/zpool_add/zpool_add_009_neg.ksh \
functional/cli_root/zpool_add/zpool_add_010_pos.ksh \
functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh \
functional/cli_root/zpool_attach/attach-o_ashift.ksh \
functional/cli_root/zpool_attach/cleanup.ksh \
functional/cli_root/zpool_attach/setup.ksh \
functional/cli_root/zpool_attach/zpool_attach_001_neg.ksh \
functional/cli_root/zpool/cleanup.ksh \
functional/cli_root/zpool_clear/cleanup.ksh \
functional/cli_root/zpool_clear/setup.ksh \
functional/cli_root/zpool_clear/zpool_clear_001_pos.ksh \
functional/cli_root/zpool_clear/zpool_clear_002_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_003_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_readonly.ksh \
functional/cli_root/zpool_create/cleanup.ksh \
functional/cli_root/zpool_create/create-o_ashift.ksh \
functional/cli_root/zpool_create/setup.ksh \
functional/cli_root/zpool_create/zpool_create_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_007_neg.ksh \
functional/cli_root/zpool_create/zpool_create_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_009_neg.ksh \
functional/cli_root/zpool_create/zpool_create_010_neg.ksh \
functional/cli_root/zpool_create/zpool_create_011_neg.ksh \
functional/cli_root/zpool_create/zpool_create_012_neg.ksh \
functional/cli_root/zpool_create/zpool_create_014_neg.ksh \
functional/cli_root/zpool_create/zpool_create_015_neg.ksh \
functional/cli_root/zpool_create/zpool_create_016_pos.ksh \
functional/cli_root/zpool_create/zpool_create_017_neg.ksh \
functional/cli_root/zpool_create/zpool_create_018_pos.ksh \
functional/cli_root/zpool_create/zpool_create_019_pos.ksh \
functional/cli_root/zpool_create/zpool_create_020_pos.ksh \
functional/cli_root/zpool_create/zpool_create_021_pos.ksh \
functional/cli_root/zpool_create/zpool_create_022_pos.ksh \
functional/cli_root/zpool_create/zpool_create_023_neg.ksh \
functional/cli_root/zpool_create/zpool_create_024_pos.ksh \
functional/cli_root/zpool_create/zpool_create_crypt_combos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_dryrun_output.ksh \
functional/cli_root/zpool_create/zpool_create_encrypted.ksh \
functional/cli_root/zpool_create/zpool_create_features_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_004_neg.ksh \
functional/cli_root/zpool_create/zpool_create_features_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_007_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_009_pos.ksh \
functional/cli_root/zpool_create/zpool_create_tempname.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_001_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_002_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_003_neg.ksh \
functional/cli_root/zpool_detach/cleanup.ksh \
functional/cli_root/zpool_detach/setup.ksh \
functional/cli_root/zpool_detach/zpool_detach_001_neg.ksh \
functional/cli_root/zpool_events/cleanup.ksh \
functional/cli_root/zpool_events/setup.ksh \
functional/cli_root/zpool_events/zpool_events_clear.ksh \
functional/cli_root/zpool_events/zpool_events_clear_retained.ksh \
functional/cli_root/zpool_events/zpool_events_cliargs.ksh \
functional/cli_root/zpool_events/zpool_events_duplicates.ksh \
functional/cli_root/zpool_events/zpool_events_errors.ksh \
functional/cli_root/zpool_events/zpool_events_follow.ksh \
functional/cli_root/zpool_events/zpool_events_poolname.ksh \
functional/cli_root/zpool_expand/cleanup.ksh \
functional/cli_root/zpool_expand/setup.ksh \
functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_003_neg.ksh \
functional/cli_root/zpool_expand/zpool_expand_004_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_005_pos.ksh \
functional/cli_root/zpool_export/cleanup.ksh \
functional/cli_root/zpool_export/setup.ksh \
functional/cli_root/zpool_export/zpool_export_001_pos.ksh \
functional/cli_root/zpool_export/zpool_export_002_pos.ksh \
functional/cli_root/zpool_export/zpool_export_003_neg.ksh \
functional/cli_root/zpool_export/zpool_export_004_pos.ksh \
functional/cli_root/zpool_export/zpool_export_parallel_admin.ksh \
functional/cli_root/zpool_export/zpool_export_parallel_pos.ksh \
functional/cli_root/zpool_get/cleanup.ksh \
functional/cli_root/zpool_get/setup.ksh \
functional/cli_root/zpool_get/vdev_get_001_pos.ksh \
functional/cli_root/zpool_get/zpool_get_001_pos.ksh \
functional/cli_root/zpool_get/zpool_get_002_pos.ksh \
functional/cli_root/zpool_get/zpool_get_003_pos.ksh \
functional/cli_root/zpool_get/zpool_get_004_neg.ksh \
functional/cli_root/zpool_get/zpool_get_005_pos.ksh \
functional/cli_root/zpool_history/cleanup.ksh \
functional/cli_root/zpool_history/setup.ksh \
functional/cli_root/zpool_history/zpool_history_001_neg.ksh \
functional/cli_root/zpool_history/zpool_history_002_pos.ksh \
functional/cli_root/zpool_import/cleanup.ksh \
functional/cli_root/zpool_import/import_cachefile_device_added.ksh \
functional/cli_root/zpool_import/import_cachefile_device_removed.ksh \
functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_attached.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_detached.ksh \
functional/cli_root/zpool_import/import_cachefile_paths_changed.ksh \
functional/cli_root/zpool_import/import_cachefile_shared_device.ksh \
functional/cli_root/zpool_import/import_devices_missing.ksh \
functional/cli_root/zpool_import/import_log_missing.ksh \
functional/cli_root/zpool_import/import_paths_changed.ksh \
functional/cli_root/zpool_import/import_rewind_config_changed.ksh \
functional/cli_root/zpool_import/import_rewind_device_replaced.ksh \
functional/cli_root/zpool_import/setup.ksh \
functional/cli_root/zpool_import/zpool_import_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_004_pos.ksh \
functional/cli_root/zpool_import/zpool_import_005_pos.ksh \
functional/cli_root/zpool_import/zpool_import_006_pos.ksh \
functional/cli_root/zpool_import/zpool_import_007_pos.ksh \
functional/cli_root/zpool_import/zpool_import_008_pos.ksh \
functional/cli_root/zpool_import/zpool_import_009_neg.ksh \
functional/cli_root/zpool_import/zpool_import_010_pos.ksh \
functional/cli_root/zpool_import/zpool_import_011_neg.ksh \
functional/cli_root/zpool_import/zpool_import_012_pos.ksh \
functional/cli_root/zpool_import/zpool_import_013_neg.ksh \
functional/cli_root/zpool_import/zpool_import_014_pos.ksh \
functional/cli_root/zpool_import/zpool_import_015_pos.ksh \
functional/cli_root/zpool_import/zpool_import_016_pos.ksh \
functional/cli_root/zpool_import/zpool_import_017_pos.ksh \
functional/cli_root/zpool_import/zpool_import_all_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted_load.ksh \
functional/cli_root/zpool_import/zpool_import_errata3.ksh \
functional/cli_root/zpool_import/zpool_import_errata4.ksh \
functional/cli_root/zpool_import/zpool_import_features_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_features_002_neg.ksh \
functional/cli_root/zpool_import/zpool_import_features_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_hostid_changed.ksh \
functional/cli_root/zpool_import/zpool_import_hostid_changed_unclean_export.ksh \
functional/cli_root/zpool_import/zpool_import_hostid_changed_cachefile.ksh \
functional/cli_root/zpool_import/zpool_import_hostid_changed_cachefile_unclean_export.ksh \
functional/cli_root/zpool_import/zpool_import_missing_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_rename_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_status.ksh \
functional/cli_root/zpool_import/zpool_import_parallel_admin.ksh \
functional/cli_root/zpool_import/zpool_import_parallel_neg.ksh \
functional/cli_root/zpool_import/zpool_import_parallel_pos.ksh \
functional/cli_root/zpool_initialize/cleanup.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_attach_detach_add_remove.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_fault_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_import_export.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_offline_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_online_offline.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_split.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_neg.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_pos.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_suspend_resume.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_uninit.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_unsupported_vdevs.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_checksums.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_active.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_exported.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_removed.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_valid.ksh \
functional/cli_root/zpool_offline/cleanup.ksh \
functional/cli_root/zpool_offline/setup.ksh \
functional/cli_root/zpool_offline/zpool_offline_001_pos.ksh \
functional/cli_root/zpool_offline/zpool_offline_002_neg.ksh \
functional/cli_root/zpool_offline/zpool_offline_003_pos.ksh \
functional/cli_root/zpool_online/cleanup.ksh \
functional/cli_root/zpool_online/setup.ksh \
functional/cli_root/zpool_online/zpool_online_001_pos.ksh \
functional/cli_root/zpool_online/zpool_online_002_neg.ksh \
functional/cli_root/zpool_prefetch/cleanup.ksh \
functional/cli_root/zpool_prefetch/setup.ksh \
functional/cli_root/zpool_prefetch/zpool_prefetch_001_pos.ksh \
functional/cli_root/zpool_reguid/cleanup.ksh \
functional/cli_root/zpool_reguid/setup.ksh \
functional/cli_root/zpool_reguid/zpool_reguid_001_pos.ksh \
functional/cli_root/zpool_reguid/zpool_reguid_002_neg.ksh \
functional/cli_root/zpool_remove/cleanup.ksh \
functional/cli_root/zpool_remove/setup.ksh \
functional/cli_root/zpool_remove/zpool_remove_001_neg.ksh \
functional/cli_root/zpool_remove/zpool_remove_002_pos.ksh \
functional/cli_root/zpool_remove/zpool_remove_003_pos.ksh \
functional/cli_root/zpool_reopen/cleanup.ksh \
functional/cli_root/zpool_reopen/setup.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_006_neg.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_007_pos.ksh \
functional/cli_root/zpool_replace/cleanup.ksh \
functional/cli_root/zpool_replace/replace-o_ashift.ksh \
functional/cli_root/zpool_replace/replace_prop_ashift.ksh \
functional/cli_root/zpool_replace/setup.ksh \
functional/cli_root/zpool_replace/zpool_replace_001_neg.ksh \
functional/cli_root/zpool_resilver/cleanup.ksh \
functional/cli_root/zpool_resilver/setup.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_bad_args.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_concurrent.ksh \
functional/cli_root/zpool_scrub/cleanup.ksh \
functional/cli_root/zpool_scrub/setup.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_005_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_encrypted_unloaded.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_multiple_copies.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_txg_continue_from_last.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_001_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_003_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_004_pos.ksh \
functional/cli_root/zpool_set/cleanup.ksh \
functional/cli_root/zpool_set/setup.ksh \
functional/cli_root/zpool/setup.ksh \
functional/cli_root/zpool_set/vdev_set_001_pos.ksh \
functional/cli_root/zpool_set/zpool_set_common.kshlib \
functional/cli_root/zpool_set/zpool_set_001_pos.ksh \
functional/cli_root/zpool_set/zpool_set_002_neg.ksh \
functional/cli_root/zpool_set/zpool_set_003_neg.ksh \
functional/cli_root/zpool_set/zpool_set_ashift.ksh \
functional/cli_root/zpool_set/user_property_001_pos.ksh \
functional/cli_root/zpool_set/user_property_002_neg.ksh \
functional/cli_root/zpool_set/zpool_set_features.ksh \
functional/cli_root/zpool_set/zpool_set_clear_userprop.ksh \
functional/cli_root/zpool_split/cleanup.ksh \
functional/cli_root/zpool_split/setup.ksh \
functional/cli_root/zpool_split/zpool_split_cliargs.ksh \
functional/cli_root/zpool_split/zpool_split_devices.ksh \
functional/cli_root/zpool_split/zpool_split_dryrun_output.ksh \
functional/cli_root/zpool_split/zpool_split_encryption.ksh \
functional/cli_root/zpool_split/zpool_split_indirect.ksh \
functional/cli_root/zpool_split/zpool_split_props.ksh \
functional/cli_root/zpool_split/zpool_split_resilver.ksh \
functional/cli_root/zpool_split/zpool_split_vdevs.ksh \
functional/cli_root/zpool_split/zpool_split_wholedisk.ksh \
functional/cli_root/zpool_status/cleanup.ksh \
functional/cli_root/zpool_status/setup.ksh \
functional/cli_root/zpool_status/zpool_status_001_pos.ksh \
functional/cli_root/zpool_status/zpool_status_002_pos.ksh \
functional/cli_root/zpool_status/zpool_status_003_pos.ksh \
functional/cli_root/zpool_status/zpool_status_004_pos.ksh \
functional/cli_root/zpool_status/zpool_status_005_pos.ksh \
functional/cli_root/zpool_status/zpool_status_006_pos.ksh \
functional/cli_root/zpool_status/zpool_status_007_pos.ksh \
functional/cli_root/zpool_status/zpool_status_008_pos.ksh \
functional/cli_root/zpool_status/zpool_status_features_001_pos.ksh \
functional/cli_root/zpool_sync/cleanup.ksh \
functional/cli_root/zpool_sync/setup.ksh \
functional/cli_root/zpool_sync/zpool_sync_001_pos.ksh \
functional/cli_root/zpool_sync/zpool_sync_002_neg.ksh \
functional/cli_root/zpool_trim/cleanup.ksh \
functional/cli_root/zpool_trim/setup.ksh \
functional/cli_root/zpool_trim/zpool_trim_attach_detach_add_remove.ksh \
functional/cli_root/zpool_trim/zpool_trim_fault_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_import_export.ksh \
functional/cli_root/zpool_trim/zpool_trim_multiple.ksh \
functional/cli_root/zpool_trim/zpool_trim_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_offline_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_online_offline.ksh \
functional/cli_root/zpool_trim/zpool_trim_partial.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_secure.ksh \
functional/cli_root/zpool_trim/zpool_trim_split.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_pos.ksh \
functional/cli_root/zpool_trim/zpool_trim_suspend_resume.ksh \
functional/cli_root/zpool_trim/zpool_trim_unsupported_vdevs.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_checksums.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh \
functional/cli_root/zpool_upgrade/cleanup.ksh \
functional/cli_root/zpool_upgrade/setup.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_001_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_002_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_003_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_004_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_005_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_006_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_008_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_009_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_features_001_pos.ksh \
functional/cli_root/zpool_wait/cleanup.ksh \
functional/cli_root/zpool_wait/scan/cleanup.ksh \
functional/cli_root/zpool_wait/scan/setup.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_rebuild.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_resilver.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_basic.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh \
functional/cli_root/zpool_wait/setup.ksh \
functional/cli_root/zpool_wait/zpool_wait_discard.ksh \
functional/cli_root/zpool_wait/zpool_wait_freeing.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_multiple.ksh \
functional/cli_root/zpool_wait/zpool_wait_no_activity.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_usage.ksh \
functional/cli_root/zpool/zpool_001_neg.ksh \
functional/cli_root/zpool/zpool_002_pos.ksh \
functional/cli_root/zpool/zpool_003_pos.ksh \
functional/cli_root/zpool/zpool_colors.ksh \
functional/cli_user/misc/arcstat_001_pos.ksh \
functional/cli_user/misc/arc_summary_001_pos.ksh \
functional/cli_user/misc/arc_summary_002_neg.ksh \
functional/cli_user/misc/zilstat_001_pos.ksh \
functional/cli_user/misc/cleanup.ksh \
functional/cli_user/misc/setup.ksh \
functional/cli_user/misc/zdb_001_neg.ksh \
functional/cli_user/misc/zfs_001_neg.ksh \
functional/cli_user/misc/zfs_allow_001_neg.ksh \
functional/cli_user/misc/zfs_clone_001_neg.ksh \
functional/cli_user/misc/zfs_create_001_neg.ksh \
functional/cli_user/misc/zfs_destroy_001_neg.ksh \
functional/cli_user/misc/zfs_get_001_neg.ksh \
functional/cli_user/misc/zfs_inherit_001_neg.ksh \
functional/cli_user/misc/zfs_mount_001_neg.ksh \
functional/cli_user/misc/zfs_promote_001_neg.ksh \
functional/cli_user/misc/zfs_receive_001_neg.ksh \
functional/cli_user/misc/zfs_rename_001_neg.ksh \
functional/cli_user/misc/zfs_rollback_001_neg.ksh \
functional/cli_user/misc/zfs_send_001_neg.ksh \
functional/cli_user/misc/zfs_set_001_neg.ksh \
functional/cli_user/misc/zfs_share_001_neg.ksh \
functional/cli_user/misc/zfs_snapshot_001_neg.ksh \
functional/cli_user/misc/zfs_unallow_001_neg.ksh \
functional/cli_user/misc/zfs_unmount_001_neg.ksh \
functional/cli_user/misc/zfs_unshare_001_neg.ksh \
functional/cli_user/misc/zfs_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_001_neg.ksh \
functional/cli_user/misc/zpool_add_001_neg.ksh \
functional/cli_user/misc/zpool_attach_001_neg.ksh \
functional/cli_user/misc/zpool_clear_001_neg.ksh \
functional/cli_user/misc/zpool_create_001_neg.ksh \
functional/cli_user/misc/zpool_destroy_001_neg.ksh \
functional/cli_user/misc/zpool_detach_001_neg.ksh \
functional/cli_user/misc/zpool_export_001_neg.ksh \
functional/cli_user/misc/zpool_get_001_neg.ksh \
functional/cli_user/misc/zpool_history_001_neg.ksh \
functional/cli_user/misc/zpool_import_001_neg.ksh \
functional/cli_user/misc/zpool_import_002_neg.ksh \
functional/cli_user/misc/zpool_offline_001_neg.ksh \
functional/cli_user/misc/zpool_online_001_neg.ksh \
functional/cli_user/misc/zpool_remove_001_neg.ksh \
functional/cli_user/misc/zpool_replace_001_neg.ksh \
functional/cli_user/misc/zpool_scrub_001_neg.ksh \
functional/cli_user/misc/zpool_set_001_neg.ksh \
functional/cli_user/misc/zpool_status_001_neg.ksh \
functional/cli_user/misc/zpool_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_wait_privilege.ksh \
functional/cli_user/zfs_list/cleanup.ksh \
functional/cli_user/zfs_list/setup.ksh \
functional/cli_user/zfs_list/zfs_list_001_pos.ksh \
functional/cli_user/zfs_list/zfs_list_002_pos.ksh \
functional/cli_user/zfs_list/zfs_list_003_pos.ksh \
functional/cli_user/zfs_list/zfs_list_004_neg.ksh \
functional/cli_user/zfs_list/zfs_list_005_neg.ksh \
functional/cli_user/zfs_list/zfs_list_007_pos.ksh \
functional/cli_user/zfs_list/zfs_list_008_neg.ksh \
functional/cli_user/zpool_iostat/cleanup.ksh \
functional/cli_user/zpool_iostat/setup.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_001_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_002_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_003_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_004_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_005_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_disable.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh \
functional/cli_user/zpool_list/cleanup.ksh \
functional/cli_user/zpool_list/setup.ksh \
functional/cli_user/zpool_list/zpool_list_001_pos.ksh \
functional/cli_user/zpool_list/zpool_list_002_neg.ksh \
functional/cli_user/zpool_status/cleanup.ksh \
functional/cli_user/zpool_status/setup.ksh \
functional/cli_user/zpool_status/zpool_status_003_pos.ksh \
functional/cli_user/zpool_status/zpool_status_-c_disable.ksh \
functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh \
functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh \
functional/compression/cleanup.ksh \
functional/compression/compress_001_pos.ksh \
functional/compression/compress_002_pos.ksh \
functional/compression/compress_003_pos.ksh \
functional/compression/compress_004_pos.ksh \
functional/compression/compress_zstd_bswap.ksh \
functional/compression/l2arc_compressed_arc_disabled.ksh \
functional/compression/l2arc_compressed_arc.ksh \
functional/compression/l2arc_encrypted.ksh \
functional/compression/l2arc_encrypted_no_compressed_arc.ksh \
functional/compression/setup.ksh \
functional/cp_files/cleanup.ksh \
functional/cp_files/cp_files_001_pos.ksh \
functional/cp_files/cp_files_002_pos.ksh \
functional/cp_files/cp_stress.ksh \
functional/cp_files/setup.ksh \
functional/crtime/cleanup.ksh \
functional/crtime/crtime_001_pos.ksh \
functional/crtime/setup.ksh \
functional/ctime/cleanup.ksh \
functional/ctime/ctime_001_pos.ksh \
functional/ctime/setup.ksh \
functional/deadman/deadman_ratelimit.ksh \
functional/deadman/deadman_sync.ksh \
functional/deadman/deadman_zio.ksh \
functional/dedup/cleanup.ksh \
functional/dedup/setup.ksh \
functional/dedup/dedup_fdt_create.ksh \
functional/dedup/dedup_fdt_import.ksh \
functional/dedup/dedup_legacy_create.ksh \
functional/dedup/dedup_legacy_import.ksh \
functional/dedup/dedup_legacy_fdt_upgrade.ksh \
functional/dedup/dedup_legacy_fdt_mixed.ksh \
functional/dedup/dedup_quota.ksh \
functional/delegate/cleanup.ksh \
functional/delegate/setup.ksh \
functional/delegate/zfs_allow_001_pos.ksh \
functional/delegate/zfs_allow_002_pos.ksh \
functional/delegate/zfs_allow_003_pos.ksh \
functional/delegate/zfs_allow_004_pos.ksh \
functional/delegate/zfs_allow_005_pos.ksh \
functional/delegate/zfs_allow_006_pos.ksh \
functional/delegate/zfs_allow_007_pos.ksh \
functional/delegate/zfs_allow_008_pos.ksh \
functional/delegate/zfs_allow_009_neg.ksh \
functional/delegate/zfs_allow_010_pos.ksh \
functional/delegate/zfs_allow_011_neg.ksh \
functional/delegate/zfs_allow_012_neg.ksh \
functional/delegate/zfs_unallow_001_pos.ksh \
functional/delegate/zfs_unallow_002_pos.ksh \
functional/delegate/zfs_unallow_003_pos.ksh \
functional/delegate/zfs_unallow_004_pos.ksh \
functional/delegate/zfs_unallow_005_pos.ksh \
functional/delegate/zfs_unallow_006_pos.ksh \
functional/delegate/zfs_unallow_007_neg.ksh \
functional/delegate/zfs_unallow_008_neg.ksh \
functional/devices/cleanup.ksh \
functional/devices/devices_001_pos.ksh \
functional/devices/devices_002_neg.ksh \
functional/devices/devices_003_pos.ksh \
functional/devices/setup.ksh \
functional/direct/dio_aligned_block.ksh \
functional/direct/dio_async_always.ksh \
functional/direct/dio_async_fio_ioengines.ksh \
functional/direct/dio_compression.ksh \
functional/direct/dio_dedup.ksh \
functional/direct/dio_encryption.ksh \
functional/direct/dio_grow_block.ksh \
+ functional/direct/dio_loopback_dev.ksh \
functional/direct/dio_max_recordsize.ksh \
functional/direct/dio_mixed.ksh \
functional/direct/dio_mmap.ksh \
functional/direct/dio_overwrites.ksh \
functional/direct/dio_property.ksh \
functional/direct/dio_random.ksh \
functional/direct/dio_read_verify.ksh \
functional/direct/dio_recordsize.ksh \
functional/direct/dio_unaligned_block.ksh \
functional/direct/dio_unaligned_filesize.ksh \
functional/direct/dio_write_verify.ksh \
functional/direct/dio_write_stable_pages.ksh \
functional/direct/setup.ksh \
functional/direct/cleanup.ksh \
functional/dos_attributes/cleanup.ksh \
functional/dos_attributes/read_dos_attrs_001.ksh \
functional/dos_attributes/setup.ksh \
functional/dos_attributes/write_dos_attrs_001.ksh \
functional/events/cleanup.ksh \
functional/events/events_001_pos.ksh \
functional/events/events_002_pos.ksh \
functional/events/setup.ksh \
functional/events/zed_cksum_config.ksh \
functional/events/zed_cksum_reported.ksh \
functional/events/zed_diagnose_multiple.ksh \
functional/events/zed_fd_spill.ksh \
functional/events/zed_io_config.ksh \
functional/events/zed_rc_filter.ksh \
functional/events/zed_slow_io.ksh \
functional/events/zed_slow_io_many_vdevs.ksh \
functional/exec/cleanup.ksh \
functional/exec/exec_001_pos.ksh \
functional/exec/exec_002_neg.ksh \
functional/exec/setup.ksh \
functional/fadvise/cleanup.ksh \
functional/fadvise/fadvise_sequential.ksh \
functional/fadvise/setup.ksh \
functional/fallocate/cleanup.ksh \
functional/fallocate/fallocate_prealloc.ksh \
functional/fallocate/fallocate_punch-hole.ksh \
functional/fallocate/fallocate_zero-range.ksh \
functional/fallocate/setup.ksh \
functional/fault/auto_offline_001_pos.ksh \
functional/fault/auto_online_001_pos.ksh \
functional/fault/auto_online_002_pos.ksh \
functional/fault/auto_replace_001_pos.ksh \
functional/fault/auto_replace_002_pos.ksh \
functional/fault/auto_spare_001_pos.ksh \
functional/fault/auto_spare_002_pos.ksh \
functional/fault/auto_spare_ashift.ksh \
functional/fault/auto_spare_multiple.ksh \
functional/fault/auto_spare_shared.ksh \
functional/fault/cleanup.ksh \
functional/fault/decompress_fault.ksh \
functional/fault/decrypt_fault.ksh \
functional/fault/fault_limits.ksh \
functional/fault/scrub_after_resilver.ksh \
functional/fault/suspend_on_probe_errors.ksh \
functional/fault/suspend_resume_single.ksh \
functional/fault/setup.ksh \
functional/fault/zpool_status_-s.ksh \
functional/features/async_destroy/async_destroy_001_pos.ksh \
functional/features/async_destroy/cleanup.ksh \
functional/features/async_destroy/setup.ksh \
functional/features/large_dnode/cleanup.ksh \
functional/features/large_dnode/large_dnode_001_pos.ksh \
functional/features/large_dnode/large_dnode_002_pos.ksh \
functional/features/large_dnode/large_dnode_003_pos.ksh \
functional/features/large_dnode/large_dnode_004_neg.ksh \
functional/features/large_dnode/large_dnode_005_pos.ksh \
functional/features/large_dnode/large_dnode_006_pos.ksh \
functional/features/large_dnode/large_dnode_007_neg.ksh \
functional/features/large_dnode/large_dnode_008_pos.ksh \
functional/features/large_dnode/large_dnode_009_pos.ksh \
functional/features/large_dnode/setup.ksh \
functional/grow/grow_pool_001_pos.ksh \
functional/grow/grow_replicas_001_pos.ksh \
functional/history/cleanup.ksh \
functional/history/history_001_pos.ksh \
functional/history/history_002_pos.ksh \
functional/history/history_003_pos.ksh \
functional/history/history_004_pos.ksh \
functional/history/history_005_neg.ksh \
functional/history/history_006_neg.ksh \
functional/history/history_007_pos.ksh \
functional/history/history_008_pos.ksh \
functional/history/history_009_pos.ksh \
functional/history/history_010_pos.ksh \
functional/history/setup.ksh \
functional/inheritance/cleanup.ksh \
functional/inheritance/inherit_001_pos.ksh \
functional/inuse/inuse_001_pos.ksh \
functional/inuse/inuse_003_pos.ksh \
functional/inuse/inuse_004_pos.ksh \
functional/inuse/inuse_005_pos.ksh \
functional/inuse/inuse_006_pos.ksh \
functional/inuse/inuse_007_pos.ksh \
functional/inuse/inuse_008_pos.ksh \
functional/inuse/inuse_009_pos.ksh \
functional/inuse/setup.ksh \
functional/io/cleanup.ksh \
functional/io/io_uring.ksh \
functional/io/libaio.ksh \
functional/io/mmap.ksh \
functional/io/posixaio.ksh \
functional/io/psync.ksh \
functional/io/setup.ksh \
functional/io/sync.ksh \
functional/l2arc/cleanup.ksh \
functional/l2arc/l2arc_arcstats_pos.ksh \
functional/l2arc/l2arc_l2miss_pos.ksh \
functional/l2arc/l2arc_mfuonly_pos.ksh \
functional/l2arc/persist_l2arc_001_pos.ksh \
functional/l2arc/persist_l2arc_002_pos.ksh \
functional/l2arc/persist_l2arc_003_neg.ksh \
functional/l2arc/persist_l2arc_004_pos.ksh \
functional/l2arc/persist_l2arc_005_pos.ksh \
functional/l2arc/setup.ksh \
functional/large_files/cleanup.ksh \
functional/large_files/large_files_001_pos.ksh \
functional/large_files/large_files_002_pos.ksh \
functional/large_files/setup.ksh \
functional/largest_pool/largest_pool_001_pos.ksh \
functional/libzfs/cleanup.ksh \
functional/libzfs/libzfs_input.ksh \
functional/libzfs/setup.ksh \
functional/limits/cleanup.ksh \
functional/limits/filesystem_count.ksh \
functional/limits/filesystem_limit.ksh \
functional/limits/setup.ksh \
functional/limits/snapshot_count.ksh \
functional/limits/snapshot_limit.ksh \
functional/link_count/cleanup.ksh \
functional/link_count/link_count_001.ksh \
functional/link_count/link_count_root_inode.ksh \
functional/link_count/setup.ksh \
functional/longname/cleanup.ksh \
functional/longname/longname_001_pos.ksh \
functional/longname/longname_002_pos.ksh \
functional/longname/longname_003_pos.ksh \
functional/longname/setup.ksh \
functional/log_spacemap/log_spacemap_import_logs.ksh \
functional/migration/cleanup.ksh \
functional/migration/migration_001_pos.ksh \
functional/migration/migration_002_pos.ksh \
functional/migration/migration_003_pos.ksh \
functional/migration/migration_004_pos.ksh \
functional/migration/migration_005_pos.ksh \
functional/migration/migration_006_pos.ksh \
functional/migration/migration_007_pos.ksh \
functional/migration/migration_008_pos.ksh \
functional/migration/migration_009_pos.ksh \
functional/migration/migration_010_pos.ksh \
functional/migration/migration_011_pos.ksh \
functional/migration/migration_012_pos.ksh \
functional/migration/setup.ksh \
functional/mmap/cleanup.ksh \
functional/mmap/mmap_libaio_001_pos.ksh \
functional/mmap/mmap_mixed.ksh \
functional/mmap/mmap_read_001_pos.ksh \
functional/mmap/mmap_seek_001_pos.ksh \
functional/mmap/mmap_sync_001_pos.ksh \
functional/mmap/mmap_write_001_pos.ksh \
functional/mmap/setup.ksh \
functional/mmp/cleanup.ksh \
functional/mmp/mmp_active_import.ksh \
functional/mmp/mmp_exported_import.ksh \
functional/mmp/mmp_hostid.ksh \
functional/mmp/mmp_inactive_import.ksh \
functional/mmp/mmp_interval.ksh \
functional/mmp/mmp_on_off.ksh \
functional/mmp/mmp_on_thread.ksh \
functional/mmp/mmp_on_uberblocks.ksh \
functional/mmp/mmp_on_zdb.ksh \
functional/mmp/mmp_reset_interval.ksh \
functional/mmp/mmp_write_distribution.ksh \
functional/mmp/mmp_write_slow_disk.ksh \
functional/mmp/mmp_write_uberblocks.ksh \
functional/mmp/multihost_history.ksh \
functional/mmp/setup.ksh \
functional/mount/cleanup.ksh \
functional/mount/setup.ksh \
functional/mount/umount_001.ksh \
functional/mount/umountall_001.ksh \
functional/mount/umount_unlinked_drain.ksh \
functional/mv_files/cleanup.ksh \
functional/mv_files/mv_files_001_pos.ksh \
functional/mv_files/mv_files_002_pos.ksh \
functional/mv_files/random_creation.ksh \
functional/mv_files/setup.ksh \
functional/nestedfs/cleanup.ksh \
functional/nestedfs/nestedfs_001_pos.ksh \
functional/nestedfs/setup.ksh \
functional/nopwrite/cleanup.ksh \
functional/nopwrite/nopwrite_copies.ksh \
functional/nopwrite/nopwrite_mtime.ksh \
functional/nopwrite/nopwrite_negative.ksh \
functional/nopwrite/nopwrite_promoted_clone.ksh \
functional/nopwrite/nopwrite_recsize.ksh \
functional/nopwrite/nopwrite_sync.ksh \
functional/nopwrite/nopwrite_varying_compression.ksh \
functional/nopwrite/nopwrite_volume.ksh \
functional/nopwrite/setup.ksh \
functional/no_space/cleanup.ksh \
functional/no_space/enospc_001_pos.ksh \
functional/no_space/enospc_002_pos.ksh \
functional/no_space/enospc_003_pos.ksh \
functional/no_space/enospc_df.ksh \
functional/no_space/enospc_ganging.ksh \
functional/no_space/enospc_rm.ksh \
functional/no_space/setup.ksh \
functional/online_offline/cleanup.ksh \
functional/online_offline/online_offline_001_pos.ksh \
functional/online_offline/online_offline_002_neg.ksh \
functional/online_offline/online_offline_003_neg.ksh \
functional/online_offline/setup.ksh \
functional/pam/cleanup.ksh \
functional/pam/pam_basic.ksh \
functional/pam/pam_change_unmounted.ksh \
+ functional/pam/pam_mount_recursively.ksh \
functional/pam/pam_nounmount.ksh \
functional/pam/pam_recursive.ksh \
functional/pam/pam_short_password.ksh \
functional/pam/setup.ksh \
functional/pool_checkpoint/checkpoint_after_rewind.ksh \
functional/pool_checkpoint/checkpoint_big_rewind.ksh \
functional/pool_checkpoint/checkpoint_capacity.ksh \
functional/pool_checkpoint/checkpoint_conf_change.ksh \
functional/pool_checkpoint/checkpoint_discard_busy.ksh \
functional/pool_checkpoint/checkpoint_discard.ksh \
functional/pool_checkpoint/checkpoint_discard_many.ksh \
functional/pool_checkpoint/checkpoint_indirect.ksh \
functional/pool_checkpoint/checkpoint_invalid.ksh \
functional/pool_checkpoint/checkpoint_lun_expsz.ksh \
functional/pool_checkpoint/checkpoint_open.ksh \
functional/pool_checkpoint/checkpoint_removal.ksh \
functional/pool_checkpoint/checkpoint_rewind.ksh \
functional/pool_checkpoint/checkpoint_ro_rewind.ksh \
functional/pool_checkpoint/checkpoint_sm_scale.ksh \
functional/pool_checkpoint/checkpoint_twice.ksh \
functional/pool_checkpoint/checkpoint_vdev_add.ksh \
functional/pool_checkpoint/checkpoint_zdb.ksh \
functional/pool_checkpoint/checkpoint_zhack_feat.ksh \
functional/pool_checkpoint/cleanup.ksh \
functional/pool_checkpoint/setup.ksh \
functional/pool_names/pool_names_001_pos.ksh \
functional/pool_names/pool_names_002_neg.ksh \
functional/poolversion/cleanup.ksh \
functional/poolversion/poolversion_001_pos.ksh \
functional/poolversion/poolversion_002_pos.ksh \
functional/poolversion/setup.ksh \
functional/privilege/cleanup.ksh \
functional/privilege/privilege_001_pos.ksh \
functional/privilege/privilege_002_pos.ksh \
functional/privilege/setup.ksh \
functional/procfs/cleanup.ksh \
functional/procfs/pool_state.ksh \
functional/procfs/procfs_list_basic.ksh \
functional/procfs/procfs_list_concurrent_readers.ksh \
functional/procfs/procfs_list_stale_read.ksh \
functional/procfs/setup.ksh \
functional/projectquota/cleanup.ksh \
functional/projectquota/projectid_001_pos.ksh \
functional/projectquota/projectid_002_pos.ksh \
functional/projectquota/projectid_003_pos.ksh \
functional/projectquota/projectquota_001_pos.ksh \
functional/projectquota/projectquota_002_pos.ksh \
functional/projectquota/projectquota_003_pos.ksh \
functional/projectquota/projectquota_004_neg.ksh \
functional/projectquota/projectquota_005_pos.ksh \
functional/projectquota/projectquota_006_pos.ksh \
functional/projectquota/projectquota_007_pos.ksh \
functional/projectquota/projectquota_008_pos.ksh \
functional/projectquota/projectquota_009_pos.ksh \
functional/projectquota/projectspace_001_pos.ksh \
functional/projectquota/projectspace_002_pos.ksh \
functional/projectquota/projectspace_003_pos.ksh \
functional/projectquota/projectspace_004_pos.ksh \
functional/projectquota/projecttree_001_pos.ksh \
functional/projectquota/projecttree_002_pos.ksh \
functional/projectquota/projecttree_003_neg.ksh \
functional/projectquota/setup.ksh \
functional/quota/cleanup.ksh \
functional/quota/quota_001_pos.ksh \
functional/quota/quota_002_pos.ksh \
functional/quota/quota_003_pos.ksh \
functional/quota/quota_004_pos.ksh \
functional/quota/quota_005_pos.ksh \
functional/quota/quota_006_neg.ksh \
functional/quota/setup.ksh \
functional/raidz/cleanup.ksh \
functional/raidz/raidz_001_neg.ksh \
functional/raidz/raidz_002_pos.ksh \
functional/raidz/raidz_expand_001_pos.ksh \
functional/raidz/raidz_expand_002_pos.ksh \
functional/raidz/raidz_expand_003_neg.ksh \
functional/raidz/raidz_expand_003_pos.ksh \
functional/raidz/raidz_expand_004_pos.ksh \
functional/raidz/raidz_expand_005_pos.ksh \
functional/raidz/raidz_expand_006_neg.ksh \
functional/raidz/raidz_expand_007_neg.ksh \
functional/raidz/setup.ksh \
functional/redacted_send/cleanup.ksh \
functional/redacted_send/redacted_compressed.ksh \
functional/redacted_send/redacted_contents.ksh \
functional/redacted_send/redacted_deleted.ksh \
functional/redacted_send/redacted_disabled_feature.ksh \
functional/redacted_send/redacted_embedded.ksh \
functional/redacted_send/redacted_holes.ksh \
functional/redacted_send/redacted_incrementals.ksh \
functional/redacted_send/redacted_largeblocks.ksh \
functional/redacted_send/redacted_many_clones.ksh \
functional/redacted_send/redacted_mixed_recsize.ksh \
functional/redacted_send/redacted_mounts.ksh \
functional/redacted_send/redacted_negative.ksh \
functional/redacted_send/redacted_origin.ksh \
functional/redacted_send/redacted_panic.ksh \
functional/redacted_send/redacted_props.ksh \
functional/redacted_send/redacted_resume.ksh \
functional/redacted_send/redacted_size.ksh \
functional/redacted_send/redacted_volume.ksh \
functional/redacted_send/setup.ksh \
functional/redundancy/cleanup.ksh \
functional/redundancy/redundancy_draid1.ksh \
functional/redundancy/redundancy_draid2.ksh \
functional/redundancy/redundancy_draid3.ksh \
functional/redundancy/redundancy_draid_damaged1.ksh \
functional/redundancy/redundancy_draid_damaged2.ksh \
functional/redundancy/redundancy_draid.ksh \
functional/redundancy/redundancy_draid_spare1.ksh \
functional/redundancy/redundancy_draid_spare2.ksh \
functional/redundancy/redundancy_draid_spare3.ksh \
functional/redundancy/redundancy_mirror.ksh \
functional/redundancy/redundancy_raidz1.ksh \
functional/redundancy/redundancy_raidz2.ksh \
functional/redundancy/redundancy_raidz3.ksh \
functional/redundancy/redundancy_raidz.ksh \
functional/redundancy/redundancy_stripe.ksh \
functional/redundancy/setup.ksh \
functional/refquota/cleanup.ksh \
functional/refquota/refquota_001_pos.ksh \
functional/refquota/refquota_002_pos.ksh \
functional/refquota/refquota_003_pos.ksh \
functional/refquota/refquota_004_pos.ksh \
functional/refquota/refquota_005_pos.ksh \
functional/refquota/refquota_006_neg.ksh \
functional/refquota/refquota_007_neg.ksh \
functional/refquota/refquota_008_neg.ksh \
functional/refquota/setup.ksh \
functional/refreserv/cleanup.ksh \
functional/refreserv/refreserv_001_pos.ksh \
functional/refreserv/refreserv_002_pos.ksh \
functional/refreserv/refreserv_003_pos.ksh \
functional/refreserv/refreserv_004_pos.ksh \
functional/refreserv/refreserv_005_pos.ksh \
functional/refreserv/refreserv_multi_raidz.ksh \
functional/refreserv/refreserv_raidz.ksh \
functional/refreserv/setup.ksh \
functional/removal/cleanup.ksh \
functional/removal/removal_all_vdev.ksh \
functional/removal/removal_cancel.ksh \
functional/removal/removal_check_space.ksh \
functional/removal/removal_condense_export.ksh \
functional/removal/removal_multiple_indirection.ksh \
functional/removal/removal_nopwrite.ksh \
functional/removal/removal_remap_deadlists.ksh \
functional/removal/removal_reservation.ksh \
functional/removal/removal_resume_export.ksh \
functional/removal/removal_sanity.ksh \
functional/removal/removal_with_add.ksh \
functional/removal/removal_with_create_fs.ksh \
functional/removal/removal_with_dedup.ksh \
functional/removal/removal_with_errors.ksh \
functional/removal/removal_with_export.ksh \
functional/removal/removal_with_faulted.ksh \
functional/removal/removal_with_ganging.ksh \
functional/removal/removal_with_indirect.ksh \
functional/removal/removal_with_remove.ksh \
functional/removal/removal_with_scrub.ksh \
functional/removal/removal_with_send.ksh \
functional/removal/removal_with_send_recv.ksh \
functional/removal/removal_with_snapshot.ksh \
functional/removal/removal_with_write.ksh \
functional/removal/removal_with_zdb.ksh \
functional/removal/remove_attach_mirror.ksh \
functional/removal/remove_expanded.ksh \
functional/removal/remove_indirect.ksh \
functional/removal/remove_mirror.ksh \
functional/removal/remove_mirror_sanity.ksh \
functional/removal/remove_raidz.ksh \
functional/rename_dirs/cleanup.ksh \
functional/rename_dirs/rename_dirs_001_pos.ksh \
functional/rename_dirs/setup.ksh \
functional/renameat2/cleanup.ksh \
functional/renameat2/setup.ksh \
functional/renameat2/renameat2_exchange.ksh \
functional/renameat2/renameat2_noreplace.ksh \
functional/renameat2/renameat2_whiteout.ksh \
functional/replacement/attach_import.ksh \
functional/replacement/attach_multiple.ksh \
functional/replacement/attach_rebuild.ksh \
functional/replacement/attach_resilver.ksh \
functional/replacement/cleanup.ksh \
functional/replacement/detach.ksh \
functional/replacement/rebuild_disabled_feature.ksh \
functional/replacement/rebuild_multiple.ksh \
functional/replacement/rebuild_raidz.ksh \
functional/replacement/replace_import.ksh \
functional/replacement/replace_rebuild.ksh \
functional/replacement/replace_resilver.ksh \
functional/replacement/resilver_restart_001.ksh \
functional/replacement/resilver_restart_002.ksh \
functional/replacement/scrub_cancel.ksh \
functional/replacement/setup.ksh \
functional/reservation/cleanup.ksh \
functional/reservation/reservation_001_pos.ksh \
functional/reservation/reservation_002_pos.ksh \
functional/reservation/reservation_003_pos.ksh \
functional/reservation/reservation_004_pos.ksh \
functional/reservation/reservation_005_pos.ksh \
functional/reservation/reservation_006_pos.ksh \
functional/reservation/reservation_007_pos.ksh \
functional/reservation/reservation_008_pos.ksh \
functional/reservation/reservation_009_pos.ksh \
functional/reservation/reservation_010_pos.ksh \
functional/reservation/reservation_011_pos.ksh \
functional/reservation/reservation_012_pos.ksh \
functional/reservation/reservation_013_pos.ksh \
functional/reservation/reservation_014_pos.ksh \
functional/reservation/reservation_015_pos.ksh \
functional/reservation/reservation_016_pos.ksh \
functional/reservation/reservation_017_pos.ksh \
functional/reservation/reservation_018_pos.ksh \
functional/reservation/reservation_019_pos.ksh \
functional/reservation/reservation_020_pos.ksh \
functional/reservation/reservation_021_neg.ksh \
functional/reservation/reservation_022_pos.ksh \
functional/reservation/setup.ksh \
functional/rootpool/cleanup.ksh \
functional/rootpool/rootpool_002_neg.ksh \
functional/rootpool/rootpool_003_neg.ksh \
functional/rootpool/rootpool_007_pos.ksh \
functional/rootpool/setup.ksh \
functional/rsend/cleanup.ksh \
functional/rsend/recv_dedup_encrypted_zvol.ksh \
functional/rsend/recv_dedup.ksh \
functional/rsend/rsend_001_pos.ksh \
functional/rsend/rsend_002_pos.ksh \
functional/rsend/rsend_003_pos.ksh \
functional/rsend/rsend_004_pos.ksh \
functional/rsend/rsend_005_pos.ksh \
functional/rsend/rsend_006_pos.ksh \
functional/rsend/rsend_007_pos.ksh \
functional/rsend/rsend_008_pos.ksh \
functional/rsend/rsend_009_pos.ksh \
functional/rsend/rsend_010_pos.ksh \
functional/rsend/rsend_011_pos.ksh \
functional/rsend/rsend_012_pos.ksh \
functional/rsend/rsend_013_pos.ksh \
functional/rsend/rsend_014_pos.ksh \
functional/rsend/rsend_016_neg.ksh \
functional/rsend/rsend_019_pos.ksh \
functional/rsend/rsend_020_pos.ksh \
functional/rsend/rsend_021_pos.ksh \
functional/rsend/rsend_022_pos.ksh \
functional/rsend/rsend_024_pos.ksh \
functional/rsend/rsend_025_pos.ksh \
functional/rsend/rsend_026_neg.ksh \
functional/rsend/rsend_027_pos.ksh \
functional/rsend/rsend_028_neg.ksh \
functional/rsend/rsend_029_neg.ksh \
functional/rsend/rsend_030_pos.ksh \
functional/rsend/rsend_031_pos.ksh \
functional/rsend/send-c_embedded_blocks.ksh \
functional/rsend/send-c_incremental.ksh \
functional/rsend/send-c_longname.ksh \
functional/rsend/send-c_lz4_disabled.ksh \
functional/rsend/send-c_mixed_compression.ksh \
functional/rsend/send-c_props.ksh \
functional/rsend/send-c_recv_dedup.ksh \
functional/rsend/send-c_recv_lz4_disabled.ksh \
functional/rsend/send-c_resume.ksh \
functional/rsend/send-c_stream_size_estimate.ksh \
functional/rsend/send-c_verify_contents.ksh \
functional/rsend/send-c_verify_ratio.ksh \
functional/rsend/send-c_volume.ksh \
functional/rsend/send-c_zstream_recompress.ksh \
functional/rsend/send-c_zstreamdump.ksh \
functional/rsend/send-cpL_varied_recsize.ksh \
functional/rsend/send_doall.ksh \
functional/rsend/send_encrypted_incremental.ksh \
functional/rsend/send_encrypted_files.ksh \
functional/rsend/send_encrypted_freeobjects.ksh \
functional/rsend/send_encrypted_hierarchy.ksh \
functional/rsend/send_encrypted_props.ksh \
functional/rsend/send_encrypted_truncated_files.ksh \
functional/rsend/send_freeobjects.ksh \
functional/rsend/send_holds.ksh \
functional/rsend/send_hole_birth.ksh \
functional/rsend/send_invalid.ksh \
functional/rsend/send-L_toggle.ksh \
functional/rsend/send_mixed_raw.ksh \
functional/rsend/send_partial_dataset.ksh \
functional/rsend/send_raw_ashift.ksh \
functional/rsend/send_raw_spill_block.ksh \
functional/rsend/send_raw_large_blocks.ksh \
functional/rsend/send_realloc_dnode_size.ksh \
functional/rsend/send_realloc_encrypted_files.ksh \
functional/rsend/send_realloc_files.ksh \
functional/rsend/send_spill_block.ksh \
functional/rsend/send-wR_encrypted_zvol.ksh \
functional/rsend/setup.ksh \
functional/scrub_mirror/cleanup.ksh \
functional/scrub_mirror/scrub_mirror_001_pos.ksh \
functional/scrub_mirror/scrub_mirror_002_pos.ksh \
functional/scrub_mirror/scrub_mirror_003_pos.ksh \
functional/scrub_mirror/scrub_mirror_004_pos.ksh \
functional/scrub_mirror/setup.ksh \
functional/slog/cleanup.ksh \
functional/slog/setup.ksh \
functional/slog/slog_001_pos.ksh \
functional/slog/slog_002_pos.ksh \
functional/slog/slog_003_pos.ksh \
functional/slog/slog_004_pos.ksh \
functional/slog/slog_005_pos.ksh \
functional/slog/slog_006_pos.ksh \
functional/slog/slog_007_pos.ksh \
functional/slog/slog_008_neg.ksh \
functional/slog/slog_009_neg.ksh \
functional/slog/slog_010_neg.ksh \
functional/slog/slog_011_neg.ksh \
functional/slog/slog_012_neg.ksh \
functional/slog/slog_013_pos.ksh \
functional/slog/slog_014_pos.ksh \
functional/slog/slog_015_neg.ksh \
functional/slog/slog_016_pos.ksh \
functional/slog/slog_replay_fs_001.ksh \
functional/slog/slog_replay_fs_002.ksh \
functional/slog/slog_replay_volume.ksh \
functional/snapshot/cleanup.ksh \
functional/snapshot/clone_001_pos.ksh \
functional/snapshot/rollback_001_pos.ksh \
functional/snapshot/rollback_002_pos.ksh \
functional/snapshot/rollback_003_pos.ksh \
functional/snapshot/setup.ksh \
functional/snapshot/snapshot_001_pos.ksh \
functional/snapshot/snapshot_002_pos.ksh \
functional/snapshot/snapshot_003_pos.ksh \
functional/snapshot/snapshot_004_pos.ksh \
functional/snapshot/snapshot_005_pos.ksh \
functional/snapshot/snapshot_006_pos.ksh \
functional/snapshot/snapshot_007_pos.ksh \
functional/snapshot/snapshot_008_pos.ksh \
functional/snapshot/snapshot_009_pos.ksh \
functional/snapshot/snapshot_010_pos.ksh \
functional/snapshot/snapshot_011_pos.ksh \
functional/snapshot/snapshot_012_pos.ksh \
functional/snapshot/snapshot_013_pos.ksh \
functional/snapshot/snapshot_014_pos.ksh \
functional/snapshot/snapshot_015_pos.ksh \
functional/snapshot/snapshot_016_pos.ksh \
functional/snapshot/snapshot_017_pos.ksh \
functional/snapshot/snapshot_018_pos.ksh \
functional/snapused/cleanup.ksh \
functional/snapused/setup.ksh \
functional/snapused/snapused_001_pos.ksh \
functional/snapused/snapused_002_pos.ksh \
functional/snapused/snapused_003_pos.ksh \
functional/snapused/snapused_004_pos.ksh \
functional/snapused/snapused_005_pos.ksh \
functional/sparse/cleanup.ksh \
functional/sparse/setup.ksh \
functional/sparse/sparse_001_pos.ksh \
functional/stat/cleanup.ksh \
functional/stat/setup.ksh \
functional/stat/stat_001_pos.ksh \
functional/suid/cleanup.ksh \
functional/suid/setup.ksh \
functional/suid/suid_write_to_none.ksh \
functional/suid/suid_write_to_sgid.ksh \
functional/suid/suid_write_to_suid.ksh \
functional/suid/suid_write_to_suid_sgid.ksh \
functional/suid/suid_write_zil_replay.ksh \
functional/trim/autotrim_config.ksh \
functional/trim/autotrim_integrity.ksh \
functional/trim/autotrim_trim_integrity.ksh \
functional/trim/cleanup.ksh \
functional/trim/setup.ksh \
functional/trim/trim_config.ksh \
functional/trim/trim_integrity.ksh \
functional/trim/trim_l2arc.ksh \
functional/truncate/cleanup.ksh \
functional/truncate/setup.ksh \
functional/truncate/truncate_001_pos.ksh \
functional/truncate/truncate_002_pos.ksh \
functional/truncate/truncate_timestamps.ksh \
functional/upgrade/cleanup.ksh \
functional/upgrade/setup.ksh \
functional/upgrade/upgrade_projectquota_001_pos.ksh \
functional/upgrade/upgrade_projectquota_002_pos.ksh \
functional/upgrade/upgrade_readonly_pool.ksh \
functional/upgrade/upgrade_userobj_001_pos.ksh \
functional/user_namespace/cleanup.ksh \
functional/user_namespace/setup.ksh \
functional/user_namespace/user_namespace_001.ksh \
functional/user_namespace/user_namespace_002.ksh \
functional/user_namespace/user_namespace_003.ksh \
functional/user_namespace/user_namespace_004.ksh \
functional/userquota/cleanup.ksh \
functional/userquota/groupspace_001_pos.ksh \
functional/userquota/groupspace_002_pos.ksh \
functional/userquota/groupspace_003_pos.ksh \
functional/userquota/setup.ksh \
functional/userquota/userquota_001_pos.ksh \
functional/userquota/userquota_002_pos.ksh \
functional/userquota/userquota_003_pos.ksh \
functional/userquota/userquota_004_pos.ksh \
functional/userquota/userquota_005_neg.ksh \
functional/userquota/userquota_006_pos.ksh \
functional/userquota/userquota_007_pos.ksh \
functional/userquota/userquota_008_pos.ksh \
functional/userquota/userquota_009_pos.ksh \
functional/userquota/userquota_010_pos.ksh \
functional/userquota/userquota_011_pos.ksh \
functional/userquota/userquota_012_neg.ksh \
functional/userquota/userquota_013_pos.ksh \
functional/userquota/userspace_001_pos.ksh \
functional/userquota/userspace_002_pos.ksh \
functional/userquota/userspace_003_pos.ksh \
functional/userquota/userspace_encrypted.ksh \
functional/userquota/userspace_send_encrypted.ksh \
functional/userquota/userspace_encrypted_13709.ksh \
functional/vdev_zaps/cleanup.ksh \
functional/vdev_zaps/setup.ksh \
functional/vdev_zaps/vdev_zaps_001_pos.ksh \
functional/vdev_zaps/vdev_zaps_002_pos.ksh \
functional/vdev_zaps/vdev_zaps_003_pos.ksh \
functional/vdev_zaps/vdev_zaps_004_pos.ksh \
functional/vdev_zaps/vdev_zaps_005_pos.ksh \
functional/vdev_zaps/vdev_zaps_006_pos.ksh \
functional/vdev_zaps/vdev_zaps_007_pos.ksh \
functional/write_dirs/cleanup.ksh \
functional/write_dirs/setup.ksh \
functional/write_dirs/write_dirs_001_pos.ksh \
functional/write_dirs/write_dirs_002_pos.ksh \
functional/xattr/cleanup.ksh \
functional/xattr/setup.ksh \
functional/xattr/xattr_001_pos.ksh \
functional/xattr/xattr_002_neg.ksh \
functional/xattr/xattr_003_neg.ksh \
functional/xattr/xattr_004_pos.ksh \
functional/xattr/xattr_005_pos.ksh \
functional/xattr/xattr_006_pos.ksh \
functional/xattr/xattr_007_neg.ksh \
functional/xattr/xattr_008_pos.ksh \
functional/xattr/xattr_009_neg.ksh \
functional/xattr/xattr_010_neg.ksh \
functional/xattr/xattr_011_pos.ksh \
functional/xattr/xattr_012_pos.ksh \
functional/xattr/xattr_013_pos.ksh \
functional/xattr/xattr_compat.ksh \
functional/zap_shrink/cleanup.ksh \
functional/zap_shrink/zap_shrink_001_pos.ksh \
functional/zap_shrink/setup.ksh \
functional/zpool_influxdb/cleanup.ksh \
functional/zpool_influxdb/setup.ksh \
functional/zpool_influxdb/zpool_influxdb.ksh \
functional/zvol/zvol_cli/cleanup.ksh \
functional/zvol/zvol_cli/setup.ksh \
functional/zvol/zvol_cli/zvol_cli_001_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_002_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_003_neg.ksh \
functional/zvol/zvol_ENOSPC/cleanup.ksh \
functional/zvol/zvol_ENOSPC/setup.ksh \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos.ksh \
functional/zvol/zvol_misc/cleanup.ksh \
functional/zvol/zvol_misc/setup.ksh \
functional/zvol/zvol_misc/zvol_misc_001_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_002_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_003_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_004_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_005_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_006_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_fua.ksh \
functional/zvol/zvol_misc/zvol_misc_hierarchy.ksh \
functional/zvol/zvol_misc/zvol_misc_rename_inuse.ksh \
functional/zvol/zvol_misc/zvol_misc_snapdev.ksh \
functional/zvol/zvol_misc/zvol_misc_trim.ksh \
functional/zvol/zvol_misc/zvol_misc_volmode.ksh \
functional/zvol/zvol_misc/zvol_misc_zil.ksh \
functional/zvol/zvol_stress/cleanup.ksh \
functional/zvol/zvol_stress/setup.ksh \
functional/zvol/zvol_stress/zvol_stress.ksh \
functional/zvol/zvol_swap/cleanup.ksh \
functional/zvol/zvol_swap/setup.ksh \
functional/zvol/zvol_swap/zvol_swap_001_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_002_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_003_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_004_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_005_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_006_pos.ksh \
functional/idmap_mount/cleanup.ksh \
functional/idmap_mount/setup.ksh \
functional/idmap_mount/idmap_mount_001.ksh \
functional/idmap_mount/idmap_mount_002.ksh \
functional/idmap_mount/idmap_mount_003.ksh \
functional/idmap_mount/idmap_mount_004.ksh \
functional/idmap_mount/idmap_mount_005.ksh
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/direct/dio_loopback_dev.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/direct/dio_loopback_dev.ksh
new file mode 100755
index 000000000000..7186eba5aafc
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/direct/dio_loopback_dev.ksh
@@ -0,0 +1,78 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025 by Triad National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/direct/dio.cfg
+. $STF_SUITE/tests/functional/direct/dio.kshlib
+
+#
+# DESCRIPTION:
+# Verify Direct I/O reads work with loopback devices using direct=always.
+#
+# STRATEGY:
+# 1. Create raidz zpool.
+# 2. Create dataset with the direct dataset property set to always.
+# 3. Create an empty file in dataset and setup loop device on it.
+# 4. Read from loopback device.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ if [[ -n $lofidev ]]; then
+ losetup -d $lofidev
+ fi
+ dio_cleanup
+}
+
+log_assert "Verify loopback devices with Direct I/O."
+
+if ! is_linux; then
+ log_unsupported "This is just a check for Linux Direct I/O"
+fi
+
+log_onexit cleanup
+
+# Create zpool
+log_must truncate -s $MINVDEVSIZE $DIO_VDEVS
+log_must create_pool $TESTPOOL1 "raidz" $DIO_VDEVS
+
+# Creating dataset with direct=always
+log_must eval "zfs create -o direct=always $TESTPOOL1/$TESTFS1"
+mntpt=$(get_prop mountpoint $TESTPOOL1/$TESTFS1)
+
+# Getting a loopback device
+lofidev=$(losetup -f)
+
+# Create loopback device
+log_must truncate -s 1M "$mntpt/temp_file"
+log_must losetup $lofidev "$mntpt/temp_file"
+
+# Read from looback device to make sure Direct I/O works with loopback device
+log_must dd if=$lofidev of=/dev/null count=1 bs=4k
+
+log_pass "Verified loopback devices for Direct I/O."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/cleanup.ksh
index bfb98cd30707..5bb6e518edb0 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/cleanup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/cleanup.ksh
@@ -1,30 +1,31 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
. $STF_SUITE/tests/functional/pam/utilities.kshlib
rmconfig
destroy_pool $TESTPOOL
del_user ${username}
del_user ${username}rec
+del_user ${username}mrec
del_group pamtestgroup
log_must rm -rf "$runstatedir"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_mount_recursively.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_mount_recursively.ksh
new file mode 100755
index 000000000000..93683da7d7db
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_mount_recursively.ksh
@@ -0,0 +1,90 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+. $STF_SUITE/tests/functional/pam/utilities.kshlib
+
+if [ -n "$ASAN_OPTIONS" ]; then
+ export LD_PRELOAD=$(ldd "$(command -v zfs)" | awk '/libasan\.so/ {print $3}')
+fi
+
+username="${username}mrec"
+
+# Set up a deeper hierarchy, a mountpoint that doesn't interfere with other tests,
+# and a user which references that mountpoint
+log_must zfs create "$TESTPOOL/mrec"
+log_must zfs create -o mountpoint="$TESTDIR/mrec" "$TESTPOOL/mrec/pam"
+echo "recurpass" | zfs create -o encryption=aes-256-gcm -o keyformat=passphrase \
+ -o keylocation=prompt "$TESTPOOL/mrec/pam/${username}"
+log_must zfs create "$TESTPOOL/mrec/pam/${username}/deep"
+log_must zfs create "$TESTPOOL/mrec/pam/${username}/deep/deeper"
+log_must zfs create -o mountpoint=none "$TESTPOOL/mrec/pam/${username}/deep/none"
+log_must zfs create -o canmount=noauto "$TESTPOOL/mrec/pam/${username}/deep/noauto"
+log_must zfs create -o canmount=off "$TESTPOOL/mrec/pam/${username}/deep/off"
+log_must zfs unmount "$TESTPOOL/mrec/pam/${username}"
+log_must zfs unload-key "$TESTPOOL/mrec/pam/${username}"
+log_must add_user pamtestgroup ${username} "$TESTDIR/mrec"
+
+function keystatus {
+ log_must [ "$(get_prop keystatus "$TESTPOOL/mrec/pam/${username}")" = "$1" ]
+}
+
+log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}"
+keystatus unavailable
+
+function test_session {
+ echo "recurpass" | pamtester ${pamservice} ${username} open_session
+ references 1
+ log_must ismounted "$TESTPOOL/mrec/pam/${username}"
+ log_must ismounted "$TESTPOOL/mrec/pam/${username}/deep"
+ log_must ismounted "$TESTPOOL/mrec/pam/${username}/deep/deeper"
+ log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}/deep/none"
+ log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}/deep/noauto"
+ log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}/deep/off"
+ keystatus available
+
+ log_must pamtester ${pamservice} ${username} close_session
+ references 0
+ log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}"
+ log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}/deep"
+ log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}/deep/deeper"
+ log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}/deep/none"
+ log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}/deep/noauto"
+ log_mustnot ismounted "$TESTPOOL/mrec/pam/${username}/deep/off"
+ keystatus unavailable
+}
+
+genconfig "homes=$TESTPOOL/mrec/pam mount_recursively runstatedir=${runstatedir}"
+test_session
+
+genconfig "homes=$TESTPOOL/mrec/pam prop_mountpoint mount_recursively runstatedir=${runstatedir}"
+test_session
+
+genconfig "homes=$TESTPOOL/mrec recursive_homes prop_mountpoint mount_recursively runstatedir=${runstatedir}"
+test_session
+
+genconfig "homes=$TESTPOOL recursive_homes prop_mountpoint mount_recursively runstatedir=${runstatedir}"
+test_session
+
+genconfig "homes=* recursive_homes prop_mountpoint mount_recursively runstatedir=${runstatedir}"
+test_session
+
+log_pass "done."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_encrypted_hierarchy.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_encrypted_hierarchy.ksh
index 8417afc88d33..6dd4ae46f947 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_encrypted_hierarchy.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_encrypted_hierarchy.ksh
@@ -1,96 +1,113 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 by Datto Inc. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
#
# DESCRIPTION:
# Raw recursive sends preserve filesystem structure.
#
# STRATEGY:
# 1. Create an encrypted filesystem with a clone and a child
# 2. Snapshot and send the filesystem tree
# 3. Verify that the filesystem structure was correctly received
# 4. Change the child to an encryption root and promote the clone
# 5. Snapshot and send the filesystem tree again
# 6. Verify that the new structure is received correctly
#
verify_runnable "both"
function cleanup
{
log_must cleanup_pool $POOL
log_must cleanup_pool $POOL2
log_must setup_test_model $POOL
}
log_assert "Raw recursive sends preserve filesystem structure."
log_onexit cleanup
# Create the filesystem hierarchy
log_must cleanup_pool $POOL
log_must eval "echo $PASSPHRASE | zfs create -o encryption=on" \
"-o keyformat=passphrase $POOL/$FS"
log_must zfs snapshot $POOL/$FS@snap
log_must zfs clone $POOL/$FS@snap $POOL/clone
log_must zfs create $POOL/$FS/child
# Back up the tree and verify the structure
log_must zfs snapshot -r $POOL@before
log_must eval "zfs send -wR $POOL@before > $BACKDIR/fs-before-R"
log_must eval "zfs receive -d -F $POOL2 < $BACKDIR/fs-before-R"
dstds=$(get_dst_ds $POOL/$FS $POOL2)
log_must cmp_ds_subs $POOL/$FS $dstds
-log_must verify_encryption_root $POOL/$FS $POOL/$FS
-log_must verify_keylocation $POOL/$FS "prompt"
-log_must verify_origin $POOL/$FS "-"
+log_must verify_encryption_root $POOL2/$FS $POOL2/$FS
+log_must verify_keylocation $POOL2/$FS "prompt"
+log_must verify_origin $POOL2/$FS "-"
-log_must verify_encryption_root $POOL/clone $POOL/$FS
-log_must verify_keylocation $POOL/clone "none"
-log_must verify_origin $POOL/clone "$POOL/$FS@snap"
+log_must verify_encryption_root $POOL2/clone $POOL2/$FS
+log_must verify_keylocation $POOL2/clone "none"
+log_must verify_origin $POOL2/clone "$POOL2/$FS@snap"
log_must verify_encryption_root $POOL/$FS/child $POOL/$FS
-log_must verify_keylocation $POOL/$FS/child "none"
+log_must verify_encryption_root $POOL2/$FS/child $POOL2/$FS
+log_must verify_keylocation $POOL2/$FS/child "none"
# Alter the hierarchy and re-send
log_must eval "echo $PASSPHRASE1 | zfs change-key -o keyformat=passphrase" \
"$POOL/$FS/child"
log_must zfs promote $POOL/clone
log_must zfs snapshot -r $POOL@after
log_must eval "zfs send -wR -i $POOL@before $POOL@after >" \
"$BACKDIR/fs-after-R"
log_must eval "zfs receive -d -F $POOL2 < $BACKDIR/fs-after-R"
log_must cmp_ds_subs $POOL/$FS $dstds
log_must verify_encryption_root $POOL/$FS $POOL/clone
log_must verify_keylocation $POOL/$FS "none"
log_must verify_origin $POOL/$FS "$POOL/clone@snap"
log_must verify_encryption_root $POOL/clone $POOL/clone
log_must verify_keylocation $POOL/clone "prompt"
log_must verify_origin $POOL/clone "-"
log_must verify_encryption_root $POOL/$FS/child $POOL/$FS/child
log_must verify_keylocation $POOL/$FS/child "prompt"
+log_must verify_encryption_root $POOL2 "-"
+log_must verify_encryption_root $POOL2/clone $POOL2/clone
+log_must verify_encryption_root $POOL2/$FS $POOL2/clone
+log_must verify_encryption_root $POOL2/$FS/child $POOL2/$FS/child
+
+log_must verify_keylocation $POOL2 "none"
+log_must verify_keylocation $POOL2/clone "prompt"
+log_must verify_keylocation $POOL2/$FS "none"
+log_must verify_keylocation $POOL2/$FS/child "prompt"
+
+log_must verify_origin $POOL2 "-"
+log_must verify_origin $POOL2/clone "-"
+log_must verify_origin $POOL2/$FS "$POOL2/clone@snap"
+log_must verify_origin $POOL2/$FS/child "-"
+log_must zfs list
+
log_pass "Raw recursive sends preserve filesystem structure."
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index b6cb1ab0652e..fcf5949c8ca6 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -1,826 +1,832 @@
/*
*/
/* zfs_config.h. Generated from zfs_config.h.in by configure. */
/* zfs_config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if translation of program messages to the user's native
language is requested. */
/* #undef ENABLE_NLS */
/* __assign_str() has one arg */
/* #undef HAVE_1ARG_ASSIGN_STR */
/* lookup_bdev() wants 1 arg */
/* #undef HAVE_1ARG_LOOKUP_BDEV */
/* kernel has access_ok with 'type' parameter */
/* #undef HAVE_ACCESS_OK_TYPE */
/* add_disk() returns int */
/* #undef HAVE_ADD_DISK_RET */
/* Define if host toolchain supports AES */
#define HAVE_AES 1
/* Define if you have [rt] */
#define HAVE_AIO_H 1
#ifdef __amd64__
#ifndef RESCUE
/* Define if host toolchain supports AVX */
#define HAVE_AVX 1
#endif
/* Define if host toolchain supports AVX2 */
#define HAVE_AVX2 1
/* Define if host toolchain supports AVX512BW */
#define HAVE_AVX512BW 1
/* Define if host toolchain supports AVX512CD */
#define HAVE_AVX512CD 1
/* Define if host toolchain supports AVX512DQ */
#define HAVE_AVX512DQ 1
/* Define if host toolchain supports AVX512ER */
#define HAVE_AVX512ER 1
/* Define if host toolchain supports AVX512F */
#define HAVE_AVX512F 1
/* Define if host toolchain supports AVX512IFMA */
#define HAVE_AVX512IFMA 1
/* Define if host toolchain supports AVX512PF */
#define HAVE_AVX512PF 1
/* Define if host toolchain supports AVX512VBMI */
#define HAVE_AVX512VBMI 1
/* Define if host toolchain supports AVX512VL */
#define HAVE_AVX512VL 1
#endif
/* backtrace() is available */
/* #undef HAVE_BACKTRACE */
/* bdevname() is available */
/* #undef HAVE_BDEVNAME */
/* bdev_check_media_change() exists */
/* #undef HAVE_BDEV_CHECK_MEDIA_CHANGE */
/* bdev_file_open_by_path() exists */
/* #undef HAVE_BDEV_FILE_OPEN_BY_PATH */
/* bdev_*_io_acct() available */
/* #undef HAVE_BDEV_IO_ACCT_63 */
/* bdev_*_io_acct() available */
/* #undef HAVE_BDEV_IO_ACCT_OLD */
/* bdev_kobj() exists */
/* #undef HAVE_BDEV_KOBJ */
/* bdev_max_discard_sectors() is available */
/* #undef HAVE_BDEV_MAX_DISCARD_SECTORS */
/* bdev_max_secure_erase_sectors() is available */
/* #undef HAVE_BDEV_MAX_SECURE_ERASE_SECTORS */
/* bdev_nr_bytes() is available */
/* #undef HAVE_BDEV_NR_BYTES */
/* bdev_open_by_path() exists */
/* #undef HAVE_BDEV_OPEN_BY_PATH */
/* bdev_release() exists */
/* #undef HAVE_BDEV_RELEASE */
/* block_device_operations->submit_bio() returns void */
/* #undef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID */
/* bdev_whole() is available */
/* #undef HAVE_BDEV_WHOLE */
/* bio_alloc() takes 4 arguments */
/* #undef HAVE_BIO_ALLOC_4ARG */
/* bio->bi_bdev->bd_disk exists */
/* #undef HAVE_BIO_BDEV_DISK */
/* bio_*_io_acct() available */
/* #undef HAVE_BIO_IO_ACCT */
/* bio_max_segs() is implemented */
/* #undef HAVE_BIO_MAX_SEGS */
/* bio_set_dev() GPL-only */
/* #undef HAVE_BIO_SET_DEV_GPL_ONLY */
/* bio_set_dev() is a macro */
/* #undef HAVE_BIO_SET_DEV_MACRO */
/* bio_set_op_attrs is available */
/* #undef HAVE_BIO_SET_OP_ATTRS */
/* blkdev_get_by_path() exists and takes 4 args */
/* #undef HAVE_BLKDEV_GET_BY_PATH_4ARG */
/* blkdev_get_by_path() handles ERESTARTSYS */
/* #undef HAVE_BLKDEV_GET_ERESTARTSYS */
/* __blkdev_issue_discard(flags) is available */
/* #undef HAVE_BLKDEV_ISSUE_DISCARD_ASYNC_FLAGS */
/* __blkdev_issue_discard() is available */
/* #undef HAVE_BLKDEV_ISSUE_DISCARD_ASYNC_NOFLAGS */
/* blkdev_issue_discard(flags) is available */
/* #undef HAVE_BLKDEV_ISSUE_DISCARD_FLAGS */
/* blkdev_issue_discard() is available */
/* #undef HAVE_BLKDEV_ISSUE_DISCARD_NOFLAGS */
/* blkdev_issue_secure_erase() is available */
/* #undef HAVE_BLKDEV_ISSUE_SECURE_ERASE */
/* blkdev_put() exists */
/* #undef HAVE_BLKDEV_PUT */
/* blkdev_put() accepts void* as arg 2 */
/* #undef HAVE_BLKDEV_PUT_HOLDER */
/* struct queue_limits has a features field */
/* #undef HAVE_BLKDEV_QUEUE_LIMITS_FEATURES */
/* blkdev_reread_part() exists */
/* #undef HAVE_BLKDEV_REREAD_PART */
/* blkg_tryget() is available */
/* #undef HAVE_BLKG_TRYGET */
/* blkg_tryget() GPL-only */
/* #undef HAVE_BLKG_TRYGET_GPL_ONLY */
/* blk_alloc_disk() exists */
/* #undef HAVE_BLK_ALLOC_DISK */
/* blk_alloc_disk() exists and takes 2 args */
/* #undef HAVE_BLK_ALLOC_DISK_2ARG */
/* blk_alloc_queue() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN */
/* blk_alloc_queue_rh() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH */
/* blk_cleanup_disk() exists */
/* #undef HAVE_BLK_CLEANUP_DISK */
/* blk_mode_t is defined */
/* #undef HAVE_BLK_MODE_T */
/* block multiqueue hardware context is cached in struct request */
/* #undef HAVE_BLK_MQ_RQ_HCTX */
/* blk queue backing_dev_info is dynamic */
/* #undef HAVE_BLK_QUEUE_BDI_DYNAMIC */
/* blk_queue_discard() is available */
/* #undef HAVE_BLK_QUEUE_DISCARD */
/* backing_dev_info is available through queue gendisk */
/* #undef HAVE_BLK_QUEUE_DISK_BDI */
/* blk_queue_secure_erase() is available */
/* #undef HAVE_BLK_QUEUE_SECURE_ERASE */
/* blk_queue_update_readahead() exists */
/* #undef HAVE_BLK_QUEUE_UPDATE_READAHEAD */
/* BLK_STS_RESV_CONFLICT is defined */
/* #undef HAVE_BLK_STS_RESV_CONFLICT */
/* Define if release() in block_device_operations takes 1 arg */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG */
/* Define if revalidate_disk() in block_device_operations */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK */
/* Define to 1 if you have the Mac OS X function
CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES */
/* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in
the CoreFoundation framework. */
/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */
/* #undef HAVE_CHECK_DISK_CHANGE */
/* copy_splice_read exists */
/* #undef HAVE_COPY_SPLICE_READ */
/* cpu_has_feature() is GPL-only */
/* #undef HAVE_CPU_HAS_FEATURE_GPL_ONLY */
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
/* #undef HAVE_DCGETTEXT */
/* DECLARE_EVENT_CLASS() is available */
/* #undef HAVE_DECLARE_EVENT_CLASS */
/* 3-arg dequeue_signal() takes a type argument */
/* #undef HAVE_DEQUEUE_SIGNAL_3ARG_TYPE */
/* dequeue_signal() takes 4 arguments */
/* #undef HAVE_DEQUEUE_SIGNAL_4ARG */
/* lookup_bdev() wants dev_t arg */
/* #undef HAVE_DEVT_LOOKUP_BDEV */
/* disk_check_media_change() exists */
/* #undef HAVE_DISK_CHECK_MEDIA_CHANGE */
/* disk_*_io_acct() available */
/* #undef HAVE_DISK_IO_ACCT */
/* disk_update_readahead() exists */
/* #undef HAVE_DISK_UPDATE_READAHEAD */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
+/* dops->d_revalidate() takes 4 args */
+/* #undef HAVE_D_REVALIDATE_4ARGS */
+
/* Define to 1 if you have the 'execvpe' function. */
#define HAVE_EXECVPE 1
/* fault_in_iov_iter_readable() is available */
/* #undef HAVE_FAULT_IN_IOV_ITER_READABLE */
/* file->f_version exists */
/* #undef HAVE_FILE_F_VERSION */
/* flush_dcache_page() is GPL-only */
/* #undef HAVE_FLUSH_DCACHE_PAGE_GPL_ONLY */
/* Define if compiler supports -Wformat-overflow */
/* #undef HAVE_FORMAT_OVERFLOW */
/* fsync_bdev() is declared in include/blkdev.h */
/* #undef HAVE_FSYNC_BDEV */
/* yes */
/* #undef HAVE_GENERIC_FADVISE */
/* generic_fillattr requires struct mnt_idmap* */
/* #undef HAVE_GENERIC_FILLATTR_IDMAP */
/* generic_fillattr requires struct mnt_idmap* and u32 request_mask */
/* #undef HAVE_GENERIC_FILLATTR_IDMAP_REQMASK */
/* generic_fillattr requires struct user_namespace* */
/* #undef HAVE_GENERIC_FILLATTR_USERNS */
/* generic_*_io_acct() 4 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_4ARG */
/* GENHD_FL_EXT_DEVT flag is available */
/* #undef HAVE_GENHD_FL_EXT_DEVT */
/* GENHD_FL_NO_PART flag is available */
/* #undef HAVE_GENHD_FL_NO_PART */
/* Define if the GNU gettext() function is already present or preinstalled. */
/* #undef HAVE_GETTEXT */
/* Define to 1 if you have the 'gettid' function. */
/* #undef HAVE_GETTID */
/* iops->get_acl() exists */
/* #undef HAVE_GET_ACL */
/* iops->get_acl() takes rcu */
/* #undef HAVE_GET_ACL_RCU */
/* has iops->get_inode_acl() */
/* #undef HAVE_GET_INODE_ACL */
/* iattr->ia_vfsuid and iattr->ia_vfsgid exist */
/* #undef HAVE_IATTR_VFSID */
/* Define if you have the iconv() function and it works. */
#define HAVE_ICONV 1
/* iops->getattr() takes struct mnt_idmap* */
/* #undef HAVE_IDMAP_IOPS_GETATTR */
/* iops->setattr() takes struct mnt_idmap* */
/* #undef HAVE_IDMAP_IOPS_SETATTR */
/* APIs for idmapped mount are present */
/* #undef HAVE_IDMAP_MNT_API */
/* mnt_idmap does not have user_namespace */
/* #undef HAVE_IDMAP_NO_USERNS */
/* Define if compiler supports -Wimplicit-fallthrough */
/* #undef HAVE_IMPLICIT_FALLTHROUGH */
/* Define if compiler supports -Winfinite-recursion */
/* #undef HAVE_INFINITE_RECURSION */
/* inode_get_atime() exists in linux/fs.h */
/* #undef HAVE_INODE_GET_ATIME */
/* inode_get_ctime() exists in linux/fs.h */
/* #undef HAVE_INODE_GET_CTIME */
/* inode_get_mtime() exists in linux/fs.h */
/* #undef HAVE_INODE_GET_MTIME */
/* inode_owner_or_capable() exists */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE */
/* inode_owner_or_capable() takes mnt_idmap */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_IDMAP */
/* inode_owner_or_capable() takes user_ns */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_USERNS */
/* inode_set_atime_to_ts() exists in linux/fs.h */
/* #undef HAVE_INODE_SET_ATIME_TO_TS */
/* inode_set_ctime_to_ts() exists in linux/fs.h */
/* #undef HAVE_INODE_SET_CTIME_TO_TS */
/* inode_set_mtime_to_ts() exists in linux/fs.h */
/* #undef HAVE_INODE_SET_MTIME_TO_TS */
/* timestamp_truncate() exists */
/* #undef HAVE_INODE_TIMESTAMP_TRUNCATE */
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* iops->create() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_CREATE_IDMAP */
/* iops->create() takes struct user_namespace* */
/* #undef HAVE_IOPS_CREATE_USERNS */
/* iops->mkdir() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_MKDIR_IDMAP */
/* iops->mkdir() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKDIR_USERNS */
/* iops->mknod() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_MKNOD_IDMAP */
/* iops->mknod() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKNOD_USERNS */
/* iops->permission() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_PERMISSION_IDMAP */
/* iops->permission() takes struct user_namespace* */
/* #undef HAVE_IOPS_PERMISSION_USERNS */
/* iops->rename() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_RENAME_IDMAP */
/* iops->rename() takes struct user_namespace* */
/* #undef HAVE_IOPS_RENAME_USERNS */
/* iops->symlink() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_SYMLINK_IDMAP */
/* iops->symlink() takes struct user_namespace* */
/* #undef HAVE_IOPS_SYMLINK_USERNS */
+/* iov_iter_get_pages2() is available */
+/* #undef HAVE_IOV_ITER_GET_PAGES2 */
+
/* iov_iter_type() is available */
/* #undef HAVE_IOV_ITER_TYPE */
/* Define to 1 if you have the 'issetugid' function. */
#define HAVE_ISSETUGID 1
/* iter_iov() is available */
/* #undef HAVE_ITER_IOV */
/* iter_is_ubuf() is available */
/* #undef HAVE_ITER_IS_UBUF */
/* kernel has kernel_fpu_* functions */
/* #undef HAVE_KERNEL_FPU */
/* kernel has asm/fpu/api.h */
/* #undef HAVE_KERNEL_FPU_API_HEADER */
/* kernel fpu internal */
/* #undef HAVE_KERNEL_FPU_INTERNAL */
/* kernel has asm/fpu/internal.h */
/* #undef HAVE_KERNEL_FPU_INTERNAL_HEADER */
/* Define if compiler supports -Winfinite-recursion */
/* #undef HAVE_KERNEL_INFINITE_RECURSION */
/* kernel defines intptr_t */
/* #undef HAVE_KERNEL_INTPTR_T */
/* kernel has kernel_neon_* functions */
/* #undef HAVE_KERNEL_NEON */
/* kernel does stack verification */
/* #undef HAVE_KERNEL_OBJTOOL */
/* kernel has linux/objtool.h */
/* #undef HAVE_KERNEL_OBJTOOL_HEADER */
/* strlcpy() exists */
/* #undef HAVE_KERNEL_STRLCPY */
/* kernel has kmap_local_page */
/* #undef HAVE_KMAP_LOCAL_PAGE */
/* Define if you have [aio] */
/* #undef HAVE_LIBAIO */
/* Define if you have [blkid] */
/* #undef HAVE_LIBBLKID */
/* Define if you have [crypto] */
#define HAVE_LIBCRYPTO 1
/* Define if you have [tirpc] */
/* #undef HAVE_LIBTIRPC */
/* Define if you have [udev] */
/* #undef HAVE_LIBUDEV */
/* Define if you have [unwind] */
/* #undef HAVE_LIBUNWIND */
/* libunwind has unw_get_elf_filename */
/* #undef HAVE_LIBUNWIND_ELF */
/* Define if you have [uuid] */
/* #undef HAVE_LIBUUID */
/* building against unsupported kernel version */
/* #undef HAVE_LINUX_EXPERIMENTAL */
/* makedev() is declared in sys/mkdev.h */
/* #undef HAVE_MAKEDEV_IN_MKDEV */
/* makedev() is declared in sys/sysmacros.h */
/* #undef HAVE_MAKEDEV_IN_SYSMACROS */
/* Noting that make_request_fn() returns blk_qc_t */
/* #undef HAVE_MAKE_REQUEST_FN_RET_QC */
/* Define to 1 if you have the 'mlockall' function. */
#define HAVE_MLOCKALL 1
/* PG_error flag is available */
/* #undef HAVE_MM_PAGE_FLAG_ERROR */
/* page_mapping() is available */
/* #undef HAVE_MM_PAGE_MAPPING */
/* page_size() is available */
/* #undef HAVE_MM_PAGE_SIZE */
/* Define if host toolchain supports MOVBE */
#define HAVE_MOVBE 1
/* folio_wait_bit() exists */
/* #undef HAVE_PAGEMAP_FOLIO_WAIT_BIT */
/* part_to_dev() exists */
/* #undef HAVE_PART_TO_DEV */
/* iops->getattr() takes a path */
/* #undef HAVE_PATH_IOPS_GETATTR */
/* Define if host toolchain supports PCLMULQDQ */
#define HAVE_PCLMULQDQ 1
/* pin_user_pages_unlocked() is available */
/* #undef HAVE_PIN_USER_PAGES_UNLOCKED */
/* proc_handler ctl_table arg is const */
/* #undef HAVE_PROC_HANDLER_CTL_TABLE_CONST */
/* proc_ops structure exists */
/* #undef HAVE_PROC_OPS_STRUCT */
/* If available, contains the Python version number currently in use. */
#define HAVE_PYTHON "3.7"
/* qat is enabled and existed */
/* #undef HAVE_QAT */
/* struct reclaim_state has reclaimed */
/* #undef HAVE_RECLAIM_STATE_RECLAIMED */
/* register_shrinker is vararg */
/* #undef HAVE_REGISTER_SHRINKER_VARARG */
/* register_sysctl_sz exists */
/* #undef HAVE_REGISTER_SYSCTL_SZ */
/* register_sysctl_table exists */
/* #undef HAVE_REGISTER_SYSCTL_TABLE */
/* iops->rename() wants flags */
/* #undef HAVE_RENAME_WANTS_FLAGS */
/* revalidate_disk() is available */
/* #undef HAVE_REVALIDATE_DISK */
/* revalidate_disk_size() is available */
/* #undef HAVE_REVALIDATE_DISK_SIZE */
/* Define to 1 if you have the <security/pam_modules.h> header file. */
#define HAVE_SECURITY_PAM_MODULES_H 1
/* setattr_prepare() accepts mnt_idmap */
/* #undef HAVE_SETATTR_PREPARE_IDMAP */
/* setattr_prepare() is available, doesn't accept user_namespace */
/* #undef HAVE_SETATTR_PREPARE_NO_USERNS */
/* setattr_prepare() accepts user_namespace */
/* #undef HAVE_SETATTR_PREPARE_USERNS */
/* iops->set_acl() takes 4 args, arg1 is struct mnt_idmap * */
/* #undef HAVE_SET_ACL_IDMAP_DENTRY */
/* iops->set_acl() takes 4 args */
/* #undef HAVE_SET_ACL_USERNS */
/* iops->set_acl() takes 4 args, arg2 is struct dentry * */
/* #undef HAVE_SET_ACL_USERNS_DENTRY_ARG2 */
/* shrinker_register exists */
/* #undef HAVE_SHRINKER_REGISTER */
/* kernel_siginfo_t exists */
/* #undef HAVE_SIGINFO */
#if defined(__amd64__) || defined(__i386__)
/* Define if host toolchain supports SSE */
#define HAVE_SSE 1
/* Define if host toolchain supports SSE2 */
#define HAVE_SSE2 1
/* Define if host toolchain supports SSE3 */
#define HAVE_SSE3 1
/* Define if host toolchain supports SSE4.1 */
#define HAVE_SSE4_1 1
/* Define if host toolchain supports SSE4.2 */
#define HAVE_SSE4_2 1
/* Define if host toolchain supports SSSE3 */
#define HAVE_SSSE3 1
#endif
/* STACK_FRAME_NON_STANDARD is defined */
/* #undef HAVE_STACK_FRAME_NON_STANDARD */
/* standalone <linux/stdarg.h> exists */
/* #undef HAVE_STANDALONE_LINUX_STDARG */
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdio.h> header file. */
#define HAVE_STDIO_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the 'strlcat' function. */
#define HAVE_STRLCAT 1
/* Define to 1 if you have the 'strlcpy' function. */
#define HAVE_STRLCPY 1
/* submit_bio is member of struct block_device_operations */
/* #undef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
/* have super_block s_shrink */
/* #undef HAVE_SUPER_BLOCK_S_SHRINK */
/* have super_block s_shrink pointer */
/* #undef HAVE_SUPER_BLOCK_S_SHRINK_PTR */
/* sync_blockdev() is declared in include/blkdev.h */
/* #undef HAVE_SYNC_BLOCKDEV */
/* struct kobj_type has default_groups */
/* #undef HAVE_SYSFS_DEFAULT_GROUPS */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* i_op->tmpfile() uses old dentry signature */
/* #undef HAVE_TMPFILE_DENTRY */
/* i_op->tmpfile() has mnt_idmap */
/* #undef HAVE_TMPFILE_IDMAP */
/* i_op->tmpfile() has userns */
/* #undef HAVE_TMPFILE_USERNS */
/* totalhigh_pages() exists */
/* #undef HAVE_TOTALHIGH_PAGES */
/* kernel has totalram_pages() */
/* #undef HAVE_TOTALRAM_PAGES_FUNC */
/* Define to 1 if you have the 'udev_device_get_is_initialized' function. */
/* #undef HAVE_UDEV_DEVICE_GET_IS_INITIALIZED */
/* kernel has __kernel_fpu_* functions */
/* #undef HAVE_UNDERSCORE_KERNEL_FPU */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* iops->getattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_GETATTR */
/* iops->setattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_SETATTR */
/* fops->clone_file_range() is available */
/* #undef HAVE_VFS_CLONE_FILE_RANGE */
/* fops->dedupe_file_range() is available */
/* #undef HAVE_VFS_DEDUPE_FILE_RANGE */
/* filemap_dirty_folio exists */
/* #undef HAVE_VFS_FILEMAP_DIRTY_FOLIO */
/* generic_copy_file_range() is available */
/* #undef HAVE_VFS_GENERIC_COPY_FILE_RANGE */
/* migrate_folio exists */
/* #undef HAVE_VFS_MIGRATE_FOLIO */
/* address_space_operations->readpages exists */
/* #undef HAVE_VFS_READPAGES */
/* read_folio exists */
/* #undef HAVE_VFS_READ_FOLIO */
/* fops->remap_file_range() is available */
/* #undef HAVE_VFS_REMAP_FILE_RANGE */
/* __set_page_dirty_nobuffers exists */
/* #undef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS */
/* splice_copy_file_range() is available */
/* #undef HAVE_VFS_SPLICE_COPY_FILE_RANGE */
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
/* int (*writepage_t)() takes struct folio* */
/* #undef HAVE_WRITEPAGE_T_FOLIO */
/* xattr_handler->get() wants dentry and inode and flags */
/* #undef HAVE_XATTR_GET_DENTRY_INODE_FLAGS */
/* xattr_handler->set() wants both dentry and inode */
/* #undef HAVE_XATTR_SET_DENTRY_INODE */
/* xattr_handler->set() takes mnt_idmap */
/* #undef HAVE_XATTR_SET_IDMAP */
/* xattr_handler->set() takes user_namespace */
/* #undef HAVE_XATTR_SET_USERNS */
/* Define if host toolchain supports XSAVE */
#define HAVE_XSAVE 1
/* Define if host toolchain supports XSAVEOPT */
#define HAVE_XSAVEOPT 1
/* Define if host toolchain supports XSAVES */
#define HAVE_XSAVES 1
/* ZERO_PAGE() is GPL-only */
/* #undef HAVE_ZERO_PAGE_GPL_ONLY */
/* Define if you have [z] */
#define HAVE_ZLIB 1
/* kernel exports FPU functions */
/* #undef KERNEL_EXPORTS_X86_FPU */
/* TBD: fetch(3) support */
#if 0
/* whether the chosen libfetch is to be loaded at run-time */
#define LIBFETCH_DYNAMIC 1
/* libfetch is fetch(3) */
#define LIBFETCH_IS_FETCH 1
/* libfetch is libcurl */
#define LIBFETCH_IS_LIBCURL 0
/* soname of chosen libfetch */
#define LIBFETCH_SONAME "libfetch.so.6"
#endif
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* make_request_fn() return type */
/* #undef MAKE_REQUEST_FN_RET */
/* using complete_and_exit() instead */
/* #undef SPL_KTHREAD_COMPLETE_AND_EXIT */
/* Defined for legacy compatibility. */
#define SPL_META_ALIAS ZFS_META_ALIAS
/* Defined for legacy compatibility. */
#define SPL_META_RELEASE ZFS_META_RELEASE
/* Defined for legacy compatibility. */
#define SPL_META_VERSION ZFS_META_VERSION
/* pde_data() is PDE_DATA() */
/* #undef SPL_PDE_DATA */
/* Define to 1 if all of the C89 standard headers exist (not just the ones
required in a freestanding environment). This macro is provided for
backward compatibility; new code need not use it. */
#define SYSTEM_FREEBSD 1
/* True if ZFS is to be compiled for a Linux system */
/* #undef SYSTEM_LINUX */
/* Version number of package */
/* #undef ZFS_DEBUG */
/* /dev/zfs minor */
/* #undef ZFS_DEVICE_MINOR */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.3.99-170-FreeBSD_g34205715e"
+#define ZFS_META_ALIAS "zfs-2.3.99-189-FreeBSD_g6a2f7b384"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
/* Define the project release date. */
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
#define ZFS_META_KVER_MAX "6.12"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "4.18"
/* Define the project license. */
#define ZFS_META_LICENSE "CDDL"
/* Define the libtool library 'age' version information. */
/* #undef ZFS_META_LT_AGE */
/* Define the libtool library 'current' version information. */
/* #undef ZFS_META_LT_CURRENT */
/* Define the libtool library 'revision' version information. */
/* #undef ZFS_META_LT_REVISION */
/* Define the project name. */
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "170-FreeBSD_g34205715e"
+#define ZFS_META_RELEASE "189-FreeBSD_g6a2f7b384"
/* Define the project version. */
#define ZFS_META_VERSION "2.3.99"
/* count is located in percpu_ref.data */
/* #undef ZFS_PERCPU_REF_COUNT_IN_DATA */
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index 0a2a21e6ddf1..d6c39ea2840a 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1 +1 @@
-#define ZFS_META_GITREV "zfs-2.3.99-170-g34205715e"
+#define ZFS_META_GITREV "zfs-2.3.99-189-g6a2f7b384"

File Metadata

Mime Type
application/octet-stream
Expires
Fri, Oct 24, 10:47 PM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
FTgbUSWN3qw_
Default Alt Text
(4 MB)

Event Timeline